From 2b14743f6eda2428a1c930efa2ae5f2c33b6229706c1046a443c206e800498e3 Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Wed, 31 Jul 2024 05:49:49 +0000 Subject: [PATCH] Accepting request 1190567 from home:cyphar:docker - Update to Docker 26.1.5-ce. See upstream changelog online at - This update includes a fix for CVE-2024-41110. bsc#1228324 - Rebase patches: * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch * 0006-bsc1221916-update-to-patched-buildkit-version-to-fix.patch * 0007-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch * cli-0001-docs-include-required-tools-in-source-tree.patch OBS-URL: https://build.opensuse.org/request/show/1190567 OBS-URL: https://build.opensuse.org/package/show/Virtualization:containers/docker?expand=0&rev=406 --- .gitattributes | 23 + .gitignore | 1 + ...llow-directory-creation-in-run-secre.patch | 73 + ...USE-implement-SUSE-container-secrets.patch | 460 + ...rt-graphdriver-btrfs-use-kernel-UAPI.patch | 46 + ...mor-clobber-docker-default-profile-o.patch | 89 + ...armor-remove-version-conditionals-fr.patch | 326 + ...e-to-patched-buildkit-version-to-fix.patch | 890 + ...e-use-AtomicWriteFile-to-save-volume.patch | 53 + 80-docker.rules | 5 + README_SUSE.md | 230 + _service | 22 + ...nclude-required-tools-in-source-tree.patch | 23382 ++++++++++++++++ docker-26.1.4_ce_de5c9cf0b96e.tar.xz | 3 + docker-26.1.5_ce_411e817ddf71.tar.xz | 3 + docker-audit.rules | 27 + docker-cli-26.1.4_ce.tar.xz | 3 + docker-cli-26.1.5_ce.tar.xz | 3 + docker-daemon.json | 8 + docker-rpmlintrc | 2 + docker.changes | 4132 +++ docker.service | 45 + docker.socket | 18 + docker.spec | 417 + docker.sysusers | 3 + sysconfig.docker | 8 + 26 files changed, 30272 insertions(+) create mode 100644 .gitattributes create mode 100644 .gitignore create mode 100644 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch create mode 100644 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch create mode 100644 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch create mode 100644 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch create mode 100644 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch create mode 100644 0006-bsc1221916-update-to-patched-buildkit-version-to-fix.patch create mode 100644 0007-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch create mode 100644 80-docker.rules create mode 100644 README_SUSE.md create mode 100644 _service create mode 100644 cli-0001-docs-include-required-tools-in-source-tree.patch create mode 100644 docker-26.1.4_ce_de5c9cf0b96e.tar.xz create mode 100644 docker-26.1.5_ce_411e817ddf71.tar.xz create mode 100644 docker-audit.rules create mode 100644 docker-cli-26.1.4_ce.tar.xz create mode 100644 docker-cli-26.1.5_ce.tar.xz create mode 100644 docker-daemon.json create mode 100644 docker-rpmlintrc create mode 100644 docker.changes create mode 100644 docker.service create mode 100644 docker.socket create mode 100644 docker.spec create mode 100644 docker.sysusers create mode 100644 sysconfig.docker diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..9b03811 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,23 @@ +## Default LFS +*.7z filter=lfs diff=lfs merge=lfs -text +*.bsp filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.gem filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.jar filter=lfs diff=lfs merge=lfs -text +*.lz filter=lfs diff=lfs merge=lfs -text +*.lzma filter=lfs diff=lfs merge=lfs -text +*.obscpio filter=lfs diff=lfs merge=lfs -text +*.oxt filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.rpm filter=lfs diff=lfs merge=lfs -text +*.tbz filter=lfs diff=lfs merge=lfs -text +*.tbz2 filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.ttf filter=lfs diff=lfs merge=lfs -text +*.txz filter=lfs diff=lfs merge=lfs -text +*.whl filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..57affb6 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.osc diff --git a/0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch b/0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch new file mode 100644 index 0000000..1e84c9a --- /dev/null +++ b/0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch @@ -0,0 +1,73 @@ +From ec53ee338835c4c1dc583695ac166f36bf3bac5c Mon Sep 17 00:00:00 2001 +From: Aleksa Sarai +Date: Wed, 8 Mar 2017 12:41:54 +1100 +Subject: [PATCH 1/7] SECRETS: daemon: allow directory creation in /run/secrets + +Since FileMode can have the directory bit set, allow a SecretStore +implementation to return secrets that are actually directories. This is +useful for creating directories and subdirectories of secrets. + +Signed-off-by: Antonio Murdaca +Signed-off-by: Aleksa Sarai +--- + daemon/container_operations_unix.go | 23 ++++++++++++++++++++--- + 1 file changed, 20 insertions(+), 3 deletions(-) + +diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go +index 4dedc1b21c87..b7c310493e79 100644 +--- a/daemon/container_operations_unix.go ++++ b/daemon/container_operations_unix.go +@@ -3,6 +3,7 @@ + package daemon // import "github.com/docker/docker/daemon" + + import ( ++ "bytes" + "context" + "fmt" + "os" +@@ -16,6 +17,7 @@ import ( + "github.com/docker/docker/daemon/links" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/libnetwork" ++ "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/process" + "github.com/docker/docker/pkg/stringid" +@@ -240,9 +242,6 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + if err != nil { + return errors.Wrap(err, "unable to get secret from secret store") + } +- if err := os.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { +- return errors.Wrap(err, "error injecting secret") +- } + + uid, err := strconv.Atoi(s.File.UID) + if err != nil { +@@ -253,6 +252,24 @@ func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { + return err + } + ++ if s.File.Mode.IsDir() { ++ if err := os.Mkdir(fPath, s.File.Mode); err != nil { ++ return errors.Wrap(err, "error creating secretdir") ++ } ++ if secret.Spec.Data != nil { ++ // If the "file" is a directory, then s.File.Data is actually a tar ++ // archive of the directory. So we just do a tar extraction here. ++ if err := archive.UntarUncompressed(bytes.NewBuffer(secret.Spec.Data), fPath, &archive.TarOptions{ ++ IDMap: daemon.idMapping, ++ }); err != nil { ++ return errors.Wrap(err, "error injecting secretdir") ++ } ++ } ++ } else { ++ if err := os.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { ++ return errors.Wrap(err, "error injecting secret") ++ } ++ } + if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { + return errors.Wrap(err, "error setting ownership for secret") + } +-- +2.45.2 + diff --git a/0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch b/0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch new file mode 100644 index 0000000..47d4964 --- /dev/null +++ b/0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch @@ -0,0 +1,460 @@ +From 759482e941bde2b67d39b52c803e3390555ff9e9 Mon Sep 17 00:00:00 2001 +From: Aleksa Sarai +Date: Wed, 8 Mar 2017 11:43:29 +1100 +Subject: [PATCH 2/7] SECRETS: SUSE: implement SUSE container secrets + +This allows for us to pass in host credentials to a container, allowing +for SUSEConnect to work with containers. + +THIS PATCH IS NOT TO BE UPSTREAMED, DUE TO THE FACT THAT IT IS +SUSE-SPECIFIC, AND UPSTREAM DOES NOT APPROVE OF THIS CONCEPT BECAUSE IT +MAKES BUILDS NOT ENTIRELY REPRODUCIBLE. + +SUSE-Bugs: bsc#1065609 bsc#1057743 bsc#1055676 bsc#1030702 +Signed-off-by: Aleksa Sarai +--- + daemon/start.go | 5 + + daemon/suse_secrets.go | 415 +++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 420 insertions(+) + create mode 100644 daemon/suse_secrets.go + +diff --git a/daemon/start.go b/daemon/start.go +index b967947af2ce..09e79e410310 100644 +--- a/daemon/start.go ++++ b/daemon/start.go +@@ -123,6 +123,11 @@ func (daemon *Daemon) containerStart(ctx context.Context, daemonCfg *configStore + return err + } + ++ // SUSE:secrets -- inject the SUSE secret store ++ if err := daemon.injectSuseSecretStore(container); err != nil { ++ return err ++ } ++ + m, cleanup, err := daemon.setupMounts(ctx, container) + if err != nil { + return err +diff --git a/daemon/suse_secrets.go b/daemon/suse_secrets.go +new file mode 100644 +index 000000000000..32b0ece91b59 +--- /dev/null ++++ b/daemon/suse_secrets.go +@@ -0,0 +1,415 @@ ++/* ++ * suse-secrets: patch for Docker to implement SUSE secrets ++ * Copyright (C) 2017-2021 SUSE LLC. ++ * ++ * Licensed under the Apache License, Version 2.0 (the "License"); ++ * you may not use this file except in compliance with the License. ++ * You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++ ++package daemon ++ ++import ( ++ "archive/tar" ++ "bytes" ++ "fmt" ++ "io" ++ "io/ioutil" ++ "os" ++ "path/filepath" ++ "strings" ++ "syscall" ++ ++ "github.com/docker/docker/container" ++ "github.com/docker/docker/pkg/archive" ++ "github.com/docker/docker/pkg/idtools" ++ ++ swarmtypes "github.com/docker/docker/api/types/swarm" ++ swarmexec "github.com/moby/swarmkit/v2/agent/exec" ++ swarmapi "github.com/moby/swarmkit/v2/api" ++ ++ "github.com/opencontainers/go-digest" ++ "github.com/sirupsen/logrus" ++) ++ ++func init() { ++ // Output to tell us in logs that SUSE:secrets is enabled. ++ logrus.Infof("SUSE:secrets :: enabled") ++} ++ ++// Creating a fake file. ++type SuseFakeFile struct { ++ Path string ++ Uid int ++ Gid int ++ Mode os.FileMode ++ Data []byte ++} ++ ++func (s SuseFakeFile) id() string { ++ // NOTE: It is _very_ important that this string always has a prefix of ++ // "suse". This is how we can ensure that we can operate on ++ // SecretReferences with a confidence that it was made by us. ++ return fmt.Sprintf("suse_%s_%s", digest.FromBytes(s.Data).Hex(), s.Path) ++} ++ ++func (s SuseFakeFile) toSecret() *swarmapi.Secret { ++ return &swarmapi.Secret{ ++ ID: s.id(), ++ Internal: true, ++ Spec: swarmapi.SecretSpec{ ++ Data: s.Data, ++ }, ++ } ++} ++ ++func (s SuseFakeFile) toSecretReference(idMaps idtools.IdentityMapping) *swarmtypes.SecretReference { ++ // Figure out the host-facing {uid,gid} based on the provided maps. Fall ++ // back to root if the UID/GID don't match (we are guaranteed that root is ++ // mapped). ++ ctrUser := idtools.Identity{UID: s.Uid, GID: s.Gid} ++ hostUser := idMaps.RootPair() ++ if user, err := idMaps.ToHost(ctrUser); err == nil { ++ hostUser = user ++ } ++ ++ // Return the secret reference as a file target. ++ return &swarmtypes.SecretReference{ ++ SecretID: s.id(), ++ SecretName: s.id(), ++ File: &swarmtypes.SecretReferenceFileTarget{ ++ Name: s.Path, ++ UID: fmt.Sprintf("%d", hostUser.UID), ++ GID: fmt.Sprintf("%d", hostUser.GID), ++ Mode: s.Mode, ++ }, ++ } ++} ++ ++// readDir will recurse into a directory prefix/dir, and return the set of ++// secrets in that directory (as a tar archive that is packed inside the "data" ++// field). The Path attribute of each has the prefix stripped. Symlinks are ++// dereferenced. ++func readDir(prefix, dir string) ([]*SuseFakeFile, error) { ++ var suseFiles []*SuseFakeFile ++ ++ path := filepath.Join(prefix, dir) ++ fi, err := os.Stat(path) ++ if err != nil { ++ // Ignore missing files. ++ if os.IsNotExist(err) { ++ // If the path itself exists it was a dangling symlink so give a ++ // warning about the symlink dangling. ++ _, err2 := os.Lstat(path) ++ if !os.IsNotExist(err2) { ++ logrus.Warnf("SUSE:secrets :: ignoring dangling symlink: %s", path) ++ } ++ return nil, nil ++ } ++ return nil, err ++ } else if !fi.IsDir() { ++ // Just to be safe. ++ logrus.Infof("SUSE:secrets :: expected %q to be a directory, but was a file", path) ++ return readFile(prefix, dir) ++ } ++ path, err = filepath.EvalSymlinks(path) ++ if err != nil { ++ return nil, err ++ } ++ ++ // Construct a tar archive of the source directory. We tar up the prefix ++ // directory and add dir as an IncludeFiles specifically so that we ++ // preserve the name of the directory itself. ++ tarStream, err := archive.TarWithOptions(path, &archive.TarOptions{ ++ Compression: archive.Uncompressed, ++ IncludeSourceDir: true, ++ }) ++ if err != nil { ++ return nil, fmt.Errorf("SUSE:secrets :: failed to tar source directory %q: %v", path, err) ++ } ++ tarStreamBytes, err := ioutil.ReadAll(tarStream) ++ if err != nil { ++ return nil, fmt.Errorf("SUSE:secrets :: failed to read full tar archive: %v", err) ++ } ++ ++ // Get a list of the symlinks in the tar archive. ++ var symlinks []string ++ tmpTr := tar.NewReader(bytes.NewBuffer(tarStreamBytes)) ++ for { ++ hdr, err := tmpTr.Next() ++ if err == io.EOF { ++ break ++ } ++ if err != nil { ++ return nil, fmt.Errorf("SUSE:secrets :: failed to read through tar reader: %v", err) ++ } ++ if hdr.Typeflag == tar.TypeSymlink { ++ symlinks = append(symlinks, hdr.Name) ++ } ++ } ++ ++ // Symlinks aren't dereferenced in the above archive, so we explicitly do a ++ // rewrite of the tar archive to include all symlinks to files. We cannot ++ // do directories here, but lower-level directory symlinks aren't supported ++ // by zypper so this isn't an issue. ++ symlinkModifyMap := map[string]archive.TarModifierFunc{} ++ for _, sym := range symlinks { ++ logrus.Debugf("SUSE:secrets: archive(%q) %q is a need-to-rewrite symlink", path, sym) ++ symlinkModifyMap[sym] = func(tarPath string, hdr *tar.Header, r io.Reader) (*tar.Header, []byte, error) { ++ logrus.Debugf("SUSE:secrets: archive(%q) mapping for symlink %q", path, tarPath) ++ tarFullPath := filepath.Join(path, tarPath) ++ ++ // Get a copy of the original byte stream. ++ oldContent, err := ioutil.ReadAll(r) ++ if err != nil { ++ return nil, nil, fmt.Errorf("suse_rewrite: failed to read archive entry %q: %v", tarPath, err) ++ } ++ ++ // Check that the file actually exists. ++ fi, err := os.Stat(tarFullPath) ++ if err != nil { ++ logrus.Warnf("suse_rewrite: failed to stat archive entry %q: %v", tarFullPath, err) ++ return hdr, oldContent, nil ++ } ++ ++ // Read the actual contents. ++ content, err := ioutil.ReadFile(tarFullPath) ++ if err != nil { ++ logrus.Warnf("suse_rewrite: failed to read %q: %v", tarFullPath, err) ++ return hdr, oldContent, nil ++ } ++ ++ newHdr, err := tar.FileInfoHeader(fi, "") ++ if err != nil { ++ // Fake the header. ++ newHdr = &tar.Header{ ++ Typeflag: tar.TypeReg, ++ Mode: 0644, ++ } ++ } ++ ++ // Update the key fields. ++ hdr.Typeflag = newHdr.Typeflag ++ hdr.Mode = newHdr.Mode ++ hdr.Linkname = "" ++ return hdr, content, nil ++ } ++ } ++ ++ // Create the rewritten tar stream. ++ tarStream = archive.ReplaceFileTarWrapper(ioutil.NopCloser(bytes.NewBuffer(tarStreamBytes)), symlinkModifyMap) ++ tarStreamBytes, err = ioutil.ReadAll(tarStream) ++ if err != nil { ++ return nil, fmt.Errorf("SUSE:secrets :: failed to read rewritten archive: %v", err) ++ } ++ ++ // Add the tar stream as a "file". ++ suseFiles = append(suseFiles, &SuseFakeFile{ ++ Path: dir, ++ Mode: fi.Mode(), ++ Data: tarStreamBytes, ++ }) ++ return suseFiles, nil ++} ++ ++// readFile returns a secret given a file under a given prefix. ++func readFile(prefix, file string) ([]*SuseFakeFile, error) { ++ path := filepath.Join(prefix, file) ++ fi, err := os.Stat(path) ++ if err != nil { ++ // Ignore missing files. ++ if os.IsNotExist(err) { ++ // If the path itself exists it was a dangling symlink so give a ++ // warning about the symlink dangling. ++ _, err2 := os.Lstat(path) ++ if !os.IsNotExist(err2) { ++ logrus.Warnf("SUSE:secrets :: ignoring dangling symlink: %s", path) ++ } ++ return nil, nil ++ } ++ return nil, err ++ } else if fi.IsDir() { ++ // Just to be safe. ++ logrus.Infof("SUSE:secrets :: expected %q to be a file, but was a directory", path) ++ return readDir(prefix, file) ++ } ++ ++ var uid, gid int ++ if stat, ok := fi.Sys().(*syscall.Stat_t); ok { ++ uid, gid = int(stat.Uid), int(stat.Gid) ++ } else { ++ logrus.Warnf("SUSE:secrets :: failed to cast file stat_t: defaulting to owned by root:root: %s", path) ++ uid, gid = 0, 0 ++ } ++ ++ bytes, err := ioutil.ReadFile(path) ++ if err != nil { ++ return nil, err ++ } ++ ++ var suseFiles []*SuseFakeFile ++ suseFiles = append(suseFiles, &SuseFakeFile{ ++ Path: file, ++ Uid: uid, ++ Gid: gid, ++ Mode: fi.Mode(), ++ Data: bytes, ++ }) ++ return suseFiles, nil ++} ++ ++// getHostSuseSecretData returns the list of SuseFakeFiles the need to be added ++// as SUSE secrets. ++func getHostSuseSecretData() ([]*SuseFakeFile, error) { ++ secrets := []*SuseFakeFile{} ++ ++ credentials, err := readDir("/etc/zypp", "credentials.d") ++ if err != nil { ++ if os.IsNotExist(err) { ++ credentials = []*SuseFakeFile{} ++ } else { ++ logrus.Errorf("SUSE:secrets :: error while reading zypp credentials: %s", err) ++ return nil, err ++ } ++ } ++ secrets = append(secrets, credentials...) ++ ++ suseConnect, err := readFile("/etc", "SUSEConnect") ++ if err != nil { ++ if os.IsNotExist(err) { ++ suseConnect = []*SuseFakeFile{} ++ } else { ++ logrus.Errorf("SUSE:secrets :: error while reading /etc/SUSEConnect: %s", err) ++ return nil, err ++ } ++ } ++ secrets = append(secrets, suseConnect...) ++ ++ return secrets, nil ++} ++ ++// To fake an empty store, in the case where we are operating on a container ++// that was created pre-swarmkit. Otherwise segfaults and other fun things ++// happen. See bsc#1057743. ++type ( ++ suseEmptyStore struct{} ++ suseEmptySecret struct{} ++ suseEmptyConfig struct{} ++ suseEmptyVolume struct{} ++) ++ ++// In order to reduce the amount of code touched outside of this file, we ++// implement the swarm API for DependencyGetter. This asserts that this ++// requirement will always be matched. In addition, for the case of the *empty* ++// getters this reduces memory usage by having a global instance. ++var ( ++ _ swarmexec.DependencyGetter = &suseDependencyStore{} ++ emptyStore swarmexec.DependencyGetter = suseEmptyStore{} ++ emptySecret swarmexec.SecretGetter = suseEmptySecret{} ++ emptyConfig swarmexec.ConfigGetter = suseEmptyConfig{} ++ emptyVolume swarmexec.VolumeGetter = suseEmptyVolume{} ++) ++ ++var errSuseEmptyStore = fmt.Errorf("SUSE:secrets :: tried to get a resource from empty store [this is a bug]") ++ ++func (_ suseEmptyConfig) Get(_ string) (*swarmapi.Config, error) { return nil, errSuseEmptyStore } ++func (_ suseEmptySecret) Get(_ string) (*swarmapi.Secret, error) { return nil, errSuseEmptyStore } ++func (_ suseEmptyVolume) Get(_ string) (string, error) { return "", errSuseEmptyStore } ++func (_ suseEmptyStore) Secrets() swarmexec.SecretGetter { return emptySecret } ++func (_ suseEmptyStore) Configs() swarmexec.ConfigGetter { return emptyConfig } ++func (_ suseEmptyStore) Volumes() swarmexec.VolumeGetter { return emptyVolume } ++ ++type suseDependencyStore struct { ++ dfl swarmexec.DependencyGetter ++ secrets map[string]*swarmapi.Secret ++} ++ ++// The following are effectively dumb wrappers that return ourselves, or the ++// default. ++func (s *suseDependencyStore) Secrets() swarmexec.SecretGetter { return s } ++func (s *suseDependencyStore) Volumes() swarmexec.VolumeGetter { return emptyVolume } ++func (s *suseDependencyStore) Configs() swarmexec.ConfigGetter { return s.dfl.Configs() } ++ ++// Get overrides the underlying DependencyGetter with our own secrets (falling ++// through to the underlying DependencyGetter if the secret isn't present). ++func (s *suseDependencyStore) Get(id string) (*swarmapi.Secret, error) { ++ logrus.Debugf("SUSE:secrets :: id=%s requested from suseDependencyGetter", id) ++ ++ secret, ok := s.secrets[id] ++ if !ok { ++ // fallthrough ++ return s.dfl.Secrets().Get(id) ++ } ++ return secret, nil ++} ++ ++// removeSuseSecrets removes any SecretReferences which were added by us ++// explicitly (this is detected by checking that the prefix has a 'suse' ++// prefix). See bsc#1057743. ++func removeSuseSecrets(c *container.Container) { ++ var without []*swarmtypes.SecretReference ++ for _, secret := range c.SecretReferences { ++ if strings.HasPrefix(secret.SecretID, "suse") { ++ logrus.Warnf("SUSE:secrets :: removing 'old' suse secret %q from container %q", secret.SecretID, c.ID) ++ continue ++ } ++ without = append(without, secret) ++ } ++ c.SecretReferences = without ++} ++ ++func (daemon *Daemon) injectSuseSecretStore(c *container.Container) error { ++ newDependencyStore := &suseDependencyStore{ ++ dfl: c.DependencyStore, ++ secrets: make(map[string]*swarmapi.Secret), ++ } ++ // Handle old containers. See bsc#1057743. ++ if newDependencyStore.dfl == nil { ++ newDependencyStore.dfl = emptyStore ++ } ++ ++ // We drop any "old" SUSE secrets, as it appears that old containers (when ++ // restarted) could still have references to old secrets. The .id() of all ++ // secrets have a prefix of "suse" so this is much easier. See bsc#1057743 ++ // for details on why this could cause issues. ++ removeSuseSecrets(c) ++ ++ secrets, err := getHostSuseSecretData() ++ if err != nil { ++ return err ++ } ++ ++ idMaps := daemon.idMapping ++ for _, secret := range secrets { ++ newDependencyStore.secrets[secret.id()] = secret.toSecret() ++ c.SecretReferences = append(c.SecretReferences, secret.toSecretReference(idMaps)) ++ } ++ ++ c.DependencyStore = newDependencyStore ++ ++ // bsc#1057743 -- In older versions of Docker we added volumes explicitly ++ // to the mount list. This causes clashes because of duplicate namespaces. ++ // If we see an existing mount that will clash with the in-built secrets ++ // mount we assume it's our fault. ++ intendedMounts, err := c.SecretMounts() ++ if err != nil { ++ logrus.Warnf("SUSE:secrets :: fetching old secret mounts: %v", err) ++ return err ++ } ++ for _, intendedMount := range intendedMounts { ++ mountPath := intendedMount.Destination ++ if volume, ok := c.MountPoints[mountPath]; ok { ++ logrus.Debugf("SUSE:secrets :: removing pre-existing %q mount: %#v", mountPath, volume) ++ delete(c.MountPoints, mountPath) ++ } ++ } ++ return nil ++} +-- +2.45.2 + diff --git a/0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch b/0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch new file mode 100644 index 0000000..e42a6ca --- /dev/null +++ b/0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch @@ -0,0 +1,46 @@ +From 983a57fd37dc8e42e9c4e4dfc72eb346a4385948 Mon Sep 17 00:00:00 2001 +From: Aleksa Sarai +Date: Mon, 22 May 2023 15:44:54 +1000 +Subject: [PATCH 3/7] BUILD: SLE12: revert "graphdriver/btrfs: use kernel UAPI + headers" + +This reverts commit 3208dcabdc8997340b255f5b880fef4e3f54580d. + +On SLE 12, our UAPI headers are too old, resulting in us being unable to +build the btrfs driver with the new headers. This patch is only needed +for SLE-12. + +Signed-off-by: Aleksa Sarai +--- + daemon/graphdriver/btrfs/btrfs.go | 13 ++++--------- + 1 file changed, 4 insertions(+), 9 deletions(-) + +diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go +index 6aaa33cf7622..7264d4036427 100644 +--- a/daemon/graphdriver/btrfs/btrfs.go ++++ b/daemon/graphdriver/btrfs/btrfs.go +@@ -4,17 +4,12 @@ package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" + + /* + #include +-#include + #include + +-#include +-#if LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0) +- #error "Headers from kernel >= 4.12 are required to build with Btrfs support." +- #error "HINT: Set 'DOCKER_BUILDTAGS=exclude_graphdriver_btrfs' to build without Btrfs." +-#endif +- +-#include +-#include ++// keep struct field name compatible with btrfs-progs < 6.1. ++#define max_referenced max_rfer ++#include ++#include + + static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { + snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); +-- +2.45.2 + diff --git a/0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch b/0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch new file mode 100644 index 0000000..8be8fd2 --- /dev/null +++ b/0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch @@ -0,0 +1,89 @@ +From 8829bb8ec53399fd41dd6f46e2bad64e773e8eaa Mon Sep 17 00:00:00 2001 +From: Aleksa Sarai +Date: Fri, 29 Jun 2018 17:59:30 +1000 +Subject: [PATCH 4/7] bsc1073877: apparmor: clobber docker-default profile on + start + +In the process of making docker-default reloading far less expensive, +567ef8e7858c ("daemon: switch to 'ensure' workflow for AppArmor +profiles") mistakenly made the initial profile load at dockerd start-up +lazy. As a result, if you have a running Docker daemon and upgrade it to +a new one with an updated AppArmor profile the new profile will not take +effect (because the old one is still loaded). The fix for this is quite +trivial, and just requires us to clobber the profile on start-up. + +Fixes: 567ef8e7858c ("daemon: switch to 'ensure' workflow for AppArmor profiles") +SUSE-Bugs: bsc#1099277 +Signed-off-by: Aleksa Sarai +--- + daemon/apparmor_default.go | 14 ++++++++++---- + daemon/apparmor_default_unsupported.go | 4 ++++ + daemon/daemon.go | 5 +++-- + 3 files changed, 17 insertions(+), 6 deletions(-) + +diff --git a/daemon/apparmor_default.go b/daemon/apparmor_default.go +index 81e10b6cbec0..e695667a190f 100644 +--- a/daemon/apparmor_default.go ++++ b/daemon/apparmor_default.go +@@ -23,6 +23,15 @@ func DefaultApparmorProfile() string { + return "" + } + ++func clobberDefaultAppArmorProfile() error { ++ if apparmor.HostSupports() { ++ if err := aaprofile.InstallDefault(defaultAppArmorProfile); err != nil { ++ return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultAppArmorProfile, err) ++ } ++ } ++ return nil ++} ++ + func ensureDefaultAppArmorProfile() error { + if apparmor.HostSupports() { + loaded, err := aaprofile.IsLoaded(defaultAppArmorProfile) +@@ -36,10 +45,7 @@ func ensureDefaultAppArmorProfile() error { + } + + // Load the profile. +- if err := aaprofile.InstallDefault(defaultAppArmorProfile); err != nil { +- return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultAppArmorProfile, err) +- } ++ return clobberDefaultAppArmorProfile() + } +- + return nil + } +diff --git a/daemon/apparmor_default_unsupported.go b/daemon/apparmor_default_unsupported.go +index be4938f5b61a..2b326fea5829 100644 +--- a/daemon/apparmor_default_unsupported.go ++++ b/daemon/apparmor_default_unsupported.go +@@ -2,6 +2,10 @@ + + package daemon // import "github.com/docker/docker/daemon" + ++func clobberDefaultAppArmorProfile() error { ++ return nil ++} ++ + func ensureDefaultAppArmorProfile() error { + return nil + } +diff --git a/daemon/daemon.go b/daemon/daemon.go +index e7ca77d8cbfc..13b39538fb00 100644 +--- a/daemon/daemon.go ++++ b/daemon/daemon.go +@@ -916,8 +916,9 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S + log.G(ctx).Warnf("Failed to configure golang's threads limit: %v", err) + } + +- // ensureDefaultAppArmorProfile does nothing if apparmor is disabled +- if err := ensureDefaultAppArmorProfile(); err != nil { ++ // Make sure we clobber any pre-existing docker-default profile to ensure ++ // that upgrades to the profile actually work smoothly. ++ if err := clobberDefaultAppArmorProfile(); err != nil { + log.G(ctx).Errorf(err.Error()) + } + +-- +2.45.2 + diff --git a/0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch b/0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch new file mode 100644 index 0000000..0256155 --- /dev/null +++ b/0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch @@ -0,0 +1,326 @@ +From 24173cd6a2643e5e680e84920864f42ed43b6f28 Mon Sep 17 00:00:00 2001 +From: Aleksa Sarai +Date: Wed, 11 Oct 2023 21:19:12 +1100 +Subject: [PATCH 5/7] SLE12: revert "apparmor: remove version-conditionals from + template" + +This reverts the following commits: + + * 7008a514493a ("profiles/apparmor: remove version-conditional constraints (< 2.8.96)") + * 2e19a4d56bf2 ("contrib/apparmor: remove version-conditionals (< 2.9) from template") + * d169a5730649 ("contrib/apparmor: remove remaining version-conditionals (< 2.9) from template") + * ecaab085db4b ("profiles/apparmor: remove use of aaparser.GetVersion()") + * e3e715666f95 ("pkg/aaparser: deprecate GetVersion, as it's no longer used") + +These version conditionals are still required on SLE 12, where our +apparmor_parser version is quite old. + +Signed-off-by: Aleksa Sarai +--- + contrib/apparmor/main.go | 16 ++++++- + contrib/apparmor/template.go | 16 +++++++ + pkg/aaparser/aaparser.go | 86 +++++++++++++++++++++++++++++++++++ + profiles/apparmor/apparmor.go | 16 ++++++- + profiles/apparmor/template.go | 4 ++ + 5 files changed, 134 insertions(+), 4 deletions(-) + create mode 100644 pkg/aaparser/aaparser.go + +diff --git a/contrib/apparmor/main.go b/contrib/apparmor/main.go +index 899d8378edae..93f98cbd20e5 100644 +--- a/contrib/apparmor/main.go ++++ b/contrib/apparmor/main.go +@@ -6,9 +6,13 @@ import ( + "os" + "path" + "text/template" ++ ++ "github.com/docker/docker/pkg/aaparser" + ) + +-type profileData struct{} ++type profileData struct { ++ Version int ++} + + func main() { + if len(os.Args) < 2 { +@@ -18,6 +22,15 @@ func main() { + // parse the arg + apparmorProfilePath := os.Args[1] + ++ version, err := aaparser.GetVersion() ++ if err != nil { ++ log.Fatal(err) ++ } ++ data := profileData{ ++ Version: version, ++ } ++ fmt.Printf("apparmor_parser is of version %+v\n", data) ++ + // parse the template + compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate) + if err != nil { +@@ -35,7 +48,6 @@ func main() { + } + defer f.Close() + +- data := profileData{} + if err := compiled.Execute(f, data); err != nil { + log.Fatalf("executing template failed: %v", err) + } +diff --git a/contrib/apparmor/template.go b/contrib/apparmor/template.go +index 58afcbe845ee..e6d0b6d37c58 100644 +--- a/contrib/apparmor/template.go ++++ b/contrib/apparmor/template.go +@@ -20,9 +20,11 @@ profile /usr/bin/docker (attach_disconnected, complain) { + + umount, + pivot_root, ++{{if ge .Version 209000}} + signal (receive) peer=@{profile_name}, + signal (receive) peer=unconfined, + signal (send), ++{{end}} + network, + capability, + owner /** rw, +@@ -45,10 +47,12 @@ profile /usr/bin/docker (attach_disconnected, complain) { + /etc/ld.so.cache r, + /etc/passwd r, + ++{{if ge .Version 209000}} + ptrace peer=@{profile_name}, + ptrace (read) peer=docker-default, + deny ptrace (trace) peer=docker-default, + deny ptrace peer=/usr/bin/docker///bin/ps, ++{{end}} + + /usr/lib/** rm, + /lib/** rm, +@@ -69,9 +73,11 @@ profile /usr/bin/docker (attach_disconnected, complain) { + /sbin/zfs rCx, + /sbin/apparmor_parser rCx, + ++{{if ge .Version 209000}} + # Transitions + change_profile -> docker-*, + change_profile -> unconfined, ++{{end}} + + profile /bin/cat (complain) { + /etc/ld.so.cache r, +@@ -93,8 +99,10 @@ profile /usr/bin/docker (attach_disconnected, complain) { + /dev/null rw, + /bin/ps mr, + ++{{if ge .Version 209000}} + # We don't need ptrace so we'll deny and ignore the error. + deny ptrace (read, trace), ++{{end}} + + # Quiet dac_override denials + deny capability dac_override, +@@ -112,11 +120,15 @@ profile /usr/bin/docker (attach_disconnected, complain) { + /proc/tty/drivers r, + } + profile /sbin/iptables (complain) { ++{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, ++{{end}} + capability net_admin, + } + profile /sbin/auplink flags=(attach_disconnected, complain) { ++{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, ++{{end}} + capability sys_admin, + capability dac_override, + +@@ -135,7 +147,9 @@ profile /usr/bin/docker (attach_disconnected, complain) { + /proc/[0-9]*/mounts rw, + } + profile /sbin/modprobe /bin/kmod (complain) { ++{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, ++{{end}} + capability sys_module, + /etc/ld.so.cache r, + /lib/** rm, +@@ -149,7 +163,9 @@ profile /usr/bin/docker (attach_disconnected, complain) { + } + # xz works via pipes, so we do not need access to the filesystem. + profile /usr/bin/xz (complain) { ++{{if ge .Version 209000}} + signal (receive) peer=/usr/bin/docker, ++{{end}} + /etc/ld.so.cache r, + /lib/** rm, + /usr/bin/xz rm, +diff --git a/pkg/aaparser/aaparser.go b/pkg/aaparser/aaparser.go +new file mode 100644 +index 000000000000..89b48b2dba58 +--- /dev/null ++++ b/pkg/aaparser/aaparser.go +@@ -0,0 +1,86 @@ ++// Package aaparser is a convenience package interacting with `apparmor_parser`. ++package aaparser // import "github.com/docker/docker/pkg/aaparser" ++ ++import ( ++ "fmt" ++ "os/exec" ++ "strconv" ++ "strings" ++) ++ ++const ( ++ binary = "apparmor_parser" ++) ++ ++// GetVersion returns the major and minor version of apparmor_parser. ++func GetVersion() (int, error) { ++ output, err := cmd("", "--version") ++ if err != nil { ++ return -1, err ++ } ++ ++ return parseVersion(output) ++} ++ ++// cmd runs `apparmor_parser` with the passed arguments. ++func cmd(dir string, arg ...string) (string, error) { ++ c := exec.Command(binary, arg...) ++ c.Dir = dir ++ ++ output, err := c.CombinedOutput() ++ if err != nil { ++ return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err) ++ } ++ ++ return string(output), nil ++} ++ ++// parseVersion takes the output from `apparmor_parser --version` and returns ++// a representation of the {major, minor, patch} version as a single number of ++// the form MMmmPPP {major, minor, patch}. ++func parseVersion(output string) (int, error) { ++ // output is in the form of the following: ++ // AppArmor parser version 2.9.1 ++ // Copyright (C) 1999-2008 Novell Inc. ++ // Copyright 2009-2012 Canonical Ltd. ++ ++ lines := strings.SplitN(output, "\n", 2) ++ words := strings.Split(lines[0], " ") ++ version := words[len(words)-1] ++ ++ // trim "-beta1" suffix from version="3.0.0-beta1" if exists ++ version = strings.SplitN(version, "-", 2)[0] ++ // also trim "~..." suffix used historically (https://gitlab.com/apparmor/apparmor/-/commit/bca67d3d27d219d11ce8c9cc70612bd637f88c10) ++ version = strings.SplitN(version, "~", 2)[0] ++ ++ // split by major minor version ++ v := strings.Split(version, ".") ++ if len(v) == 0 || len(v) > 3 { ++ return -1, fmt.Errorf("parsing version failed for output: `%s`", output) ++ } ++ ++ // Default the versions to 0. ++ var majorVersion, minorVersion, patchLevel int ++ ++ majorVersion, err := strconv.Atoi(v[0]) ++ if err != nil { ++ return -1, err ++ } ++ ++ if len(v) > 1 { ++ minorVersion, err = strconv.Atoi(v[1]) ++ if err != nil { ++ return -1, err ++ } ++ } ++ if len(v) > 2 { ++ patchLevel, err = strconv.Atoi(v[2]) ++ if err != nil { ++ return -1, err ++ } ++ } ++ ++ // major*10^5 + minor*10^3 + patch*10^0 ++ numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel ++ return numericVersion, nil ++} +diff --git a/profiles/apparmor/apparmor.go b/profiles/apparmor/apparmor.go +index 277c853ebe1f..d1aad80cbfd2 100644 +--- a/profiles/apparmor/apparmor.go ++++ b/profiles/apparmor/apparmor.go +@@ -11,10 +11,14 @@ import ( + "path" + "strings" + "text/template" ++ ++ "github.com/docker/docker/pkg/aaparser" + ) + +-// profileDirectory is the file store for apparmor profiles and macros. +-const profileDirectory = "/etc/apparmor.d" ++var ( ++ // profileDirectory is the file store for apparmor profiles and macros. ++ profileDirectory = "/etc/apparmor.d" ++) + + // profileData holds information about the given profile for generation. + type profileData struct { +@@ -26,6 +30,8 @@ type profileData struct { + Imports []string + // InnerImports defines the apparmor functions to import in the profile. + InnerImports []string ++ // Version is the {major, minor, patch} version of apparmor_parser as a single number. ++ Version int + } + + // generateDefault creates an apparmor profile from ProfileData. +@@ -45,6 +51,12 @@ func (p *profileData) generateDefault(out io.Writer) error { + p.InnerImports = append(p.InnerImports, "#include ") + } + ++ ver, err := aaparser.GetVersion() ++ if err != nil { ++ return err ++ } ++ p.Version = ver ++ + return compiled.Execute(out, p) + } + +diff --git a/profiles/apparmor/template.go b/profiles/apparmor/template.go +index 8dbc1b610288..2062aab1ac99 100644 +--- a/profiles/apparmor/template.go ++++ b/profiles/apparmor/template.go +@@ -23,6 +23,7 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { + capability, + file, + umount, ++{{if ge .Version 208096}} + # Host (privileged) processes may send signals to container processes. + signal (receive) peer=unconfined, + # runc may send signals to container processes (for "docker stop"). +@@ -33,6 +34,7 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { + signal (receive) peer={{.DaemonProfile}}, + # Container processes may send signals amongst themselves. + signal (send,receive) peer={{.Name}}, ++{{end}} + + deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) + # deny write to files not in /proc//** or /proc/sys/** +@@ -53,7 +55,9 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { + deny /sys/devices/virtual/powercap/** rwklx, + deny /sys/kernel/security/** rwklx, + ++{{if ge .Version 208095}} + # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container + ptrace (trace,read,tracedby,readby) peer={{.Name}}, ++{{end}} + } + ` +-- +2.45.2 + diff --git a/0006-bsc1221916-update-to-patched-buildkit-version-to-fix.patch b/0006-bsc1221916-update-to-patched-buildkit-version-to-fix.patch new file mode 100644 index 0000000..0c86198 --- /dev/null +++ b/0006-bsc1221916-update-to-patched-buildkit-version-to-fix.patch @@ -0,0 +1,890 @@ +From dd16d113b9215bf5b0b56c409e7272ce07525836 Mon Sep 17 00:00:00 2001 +From: Aleksa Sarai +Date: Tue, 7 May 2024 01:51:25 +1000 +Subject: [PATCH 6/7] bsc1221916: update to patched buildkit version to fix + symlink resolution + +SUSE-Bugs: https://bugzilla.suse.com/show_bug.cgi?id=1221916 +Signed-off-by: Aleksa Sarai +--- + vendor.mod | 2 + + vendor.sum | 4 +- + .../buildkit/cache/contenthash/checksum.go | 393 ++++++++++-------- + .../moby/buildkit/cache/contenthash/path.go | 161 +++---- + vendor/modules.txt | 3 +- + 5 files changed, 314 insertions(+), 249 deletions(-) + +diff --git a/vendor.mod b/vendor.mod +index d69d2aa9f87f..5c42a653b91b 100644 +--- a/vendor.mod ++++ b/vendor.mod +@@ -114,6 +114,8 @@ require ( + tags.cncf.io/container-device-interface v0.7.2 + ) + ++replace github.com/moby/buildkit => github.com/cyphar/buildkit v0.0.0-20240624075140-0db2d2345b94 ++ + require ( + cloud.google.com/go v0.110.8 // indirect + cloud.google.com/go/compute v1.23.1 // indirect +diff --git a/vendor.sum b/vendor.sum +index 7a5bd6b4077b..f2aba7f8d3eb 100644 +--- a/vendor.sum ++++ b/vendor.sum +@@ -199,6 +199,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 + github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= + github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= + github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= ++github.com/cyphar/buildkit v0.0.0-20240624075140-0db2d2345b94 h1:xBwPT+ap0LDYsQJh1VKm9NNEKF5A7e/P3TRjnbTqZUE= ++github.com/cyphar/buildkit v0.0.0-20240624075140-0db2d2345b94/go.mod h1:2cyVOv9NoHM7arphK9ZfHIWKn9YVZRFd1wXB8kKmEzY= + github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= + github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= + github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +@@ -480,8 +482,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh + github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= + github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= + github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= +-github.com/moby/buildkit v0.13.2 h1:nXNszM4qD9E7QtG7bFWPnDI1teUQFQglBzon/IU3SzI= +-github.com/moby/buildkit v0.13.2/go.mod h1:2cyVOv9NoHM7arphK9ZfHIWKn9YVZRFd1wXB8kKmEzY= + github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= + github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= + github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ= +diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go +index e0f58d57b3db..ec649f69b5e0 100644 +--- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go ++++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go +@@ -10,6 +10,7 @@ import ( + "path/filepath" + "strings" + "sync" ++ "sync/atomic" + + iradix "github.com/hashicorp/go-immutable-radix" + "github.com/hashicorp/golang-lru/simplelru" +@@ -290,7 +291,7 @@ func keyPath(p string) string { + // HandleChange notifies the source about a modification operation + func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + p = keyPath(p) +- k := convertPathToKey([]byte(p)) ++ k := convertPathToKey(p) + + deleteDir := func(cr *CacheRecord) { + if cr.Type == CacheRecordTypeDir { +@@ -369,7 +370,7 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil + // note that the source may be called later because data writing is async + if fi.Mode()&os.ModeSymlink == 0 && stat.Linkname != "" { + ln := path.Join("/", filepath.ToSlash(stat.Linkname)) +- v, ok := cc.txn.Get(convertPathToKey([]byte(ln))) ++ v, ok := cc.txn.Get(convertPathToKey(ln)) + if ok { + cp := *v.(*CacheRecord) + cr = &cp +@@ -407,7 +408,7 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, + defer m.clean() + + if !opts.Wildcard && len(opts.IncludePatterns) == 0 && len(opts.ExcludePatterns) == 0 { +- return cc.checksumFollow(ctx, m, p, opts.FollowLinks) ++ return cc.lazyChecksum(ctx, m, p, opts.FollowLinks) + } + + includedPaths, err := cc.includedPaths(ctx, m, p, opts) +@@ -418,7 +419,7 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, + if opts.FollowLinks { + for i, w := range includedPaths { + if w.record.Type == CacheRecordTypeSymlink { +- dgst, err := cc.checksumFollow(ctx, m, w.path, opts.FollowLinks) ++ dgst, err := cc.lazyChecksum(ctx, m, w.path, opts.FollowLinks) + if err != nil { + return "", err + } +@@ -445,30 +446,6 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, + return digester.Digest(), nil + } + +-func (cc *cacheContext) checksumFollow(ctx context.Context, m *mount, p string, follow bool) (digest.Digest, error) { +- const maxSymlinkLimit = 255 +- i := 0 +- for { +- if i > maxSymlinkLimit { +- return "", errors.Errorf("too many symlinks: %s", p) +- } +- cr, err := cc.checksumNoFollow(ctx, m, p) +- if err != nil { +- return "", err +- } +- if cr.Type == CacheRecordTypeSymlink && follow { +- link := cr.Linkname +- if !path.IsAbs(cr.Linkname) { +- link = path.Join(path.Dir(p), link) +- } +- i++ +- p = link +- } else { +- return cr.Digest, nil +- } +- } +-} +- + func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, opts ChecksumOpts) ([]*includedPath, error) { + cc.mu.Lock() + defer cc.mu.Unlock() +@@ -478,12 +455,12 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o + } + + root := cc.tree.Root() +- scan, err := cc.needsScan(root, "") ++ scan, err := cc.needsScan(root, "", false) + if err != nil { + return nil, err + } + if scan { +- if err := cc.scanPath(ctx, m, ""); err != nil { ++ if err := cc.scanPath(ctx, m, "", false); err != nil { + return nil, err + } + } +@@ -536,13 +513,13 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o + } + } else { + origPrefix = p +- k = convertPathToKey([]byte(origPrefix)) ++ k = convertPathToKey(origPrefix) + + // We need to resolve symlinks here, in case the base path + // involves a symlink. That will match fsutil behavior of + // calling functions such as stat and walk. + var cr *CacheRecord +- k, cr, err = getFollowLinks(root, k, true) ++ k, cr, err = getFollowLinks(root, k, false) + if err != nil { + return nil, err + } +@@ -554,7 +531,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o + iter.SeekLowerBound(append(append([]byte{}, k...), 0)) + } + +- resolvedPrefix = string(convertKeyToPath(k)) ++ resolvedPrefix = convertKeyToPath(k) + } else { + k, _, keyOk = iter.Next() + } +@@ -565,7 +542,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o + ) + + for keyOk { +- fn := string(convertKeyToPath(k)) ++ fn := convertKeyToPath(k) + + // Convert the path prefix from what we found in the prefix + // tree to what the argument specified. +@@ -751,36 +728,12 @@ func wildcardPrefix(root *iradix.Node, p string) (string, []byte, bool, error) { + return "", nil, false, nil + } + +- linksWalked := 0 +- k, cr, err := getFollowLinksWalk(root, convertPathToKey([]byte(d1)), true, &linksWalked) ++ // Only resolve the final symlink component if there are components in the ++ // wildcard segment. ++ k, cr, err := getFollowLinks(root, convertPathToKey(d1), d2 != "") + if err != nil { + return "", k, false, err + } +- +- if d2 != "" && cr != nil && cr.Type == CacheRecordTypeSymlink { +- // getFollowLinks only handles symlinks in path +- // components before the last component, so +- // handle last component in d1 specially. +- resolved := string(convertKeyToPath(k)) +- for { +- v, ok := root.Get(k) +- +- if !ok { +- return d1, k, false, nil +- } +- if v.(*CacheRecord).Type != CacheRecordTypeSymlink { +- break +- } +- +- linksWalked++ +- if linksWalked > 255 { +- return "", k, false, errors.Errorf("too many links") +- } +- +- resolved := cleanLink(resolved, v.(*CacheRecord).Linkname) +- k = convertPathToKey([]byte(resolved)) +- } +- } + return d1, k, cr != nil, nil + } + +@@ -816,19 +769,22 @@ func containsWildcards(name string) bool { + return false + } + +-func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) { ++func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string, followTrailing bool) (digest.Digest, error) { + p = keyPath(p) ++ k := convertPathToKey(p) + ++ // Try to look up the path directly without doing a scan. + cc.mu.RLock() + if cc.txn == nil { + root := cc.tree.Root() + cc.mu.RUnlock() +- v, ok := root.Get(convertPathToKey([]byte(p))) +- if ok { +- cr := v.(*CacheRecord) +- if cr.Digest != "" { +- return cr, nil +- } ++ ++ _, cr, err := getFollowLinks(root, k, followTrailing) ++ if err != nil { ++ return "", err ++ } ++ if cr != nil && cr.Digest != "" { ++ return cr.Digest, nil + } + } else { + cc.mu.RUnlock() +@@ -848,7 +804,11 @@ func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string + } + }() + +- return cc.lazyChecksum(ctx, m, p) ++ cr, err := cc.scanChecksum(ctx, m, p, followTrailing) ++ if err != nil { ++ return "", err ++ } ++ return cr.Digest, nil + } + + func (cc *cacheContext) commitActiveTransaction() { +@@ -856,7 +816,7 @@ func (cc *cacheContext) commitActiveTransaction() { + addParentToMap(d, cc.dirtyMap) + } + for d := range cc.dirtyMap { +- k := convertPathToKey([]byte(d)) ++ k := convertPathToKey(d) + if _, ok := cc.txn.Get(k); ok { + cc.txn.Insert(k, &CacheRecord{Type: CacheRecordTypeDir}) + } +@@ -867,21 +827,21 @@ func (cc *cacheContext) commitActiveTransaction() { + cc.txn = nil + } + +-func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*CacheRecord, error) { ++func (cc *cacheContext) scanChecksum(ctx context.Context, m *mount, p string, followTrailing bool) (*CacheRecord, error) { + root := cc.tree.Root() +- scan, err := cc.needsScan(root, p) ++ scan, err := cc.needsScan(root, p, followTrailing) + if err != nil { + return nil, err + } + if scan { +- if err := cc.scanPath(ctx, m, p); err != nil { ++ if err := cc.scanPath(ctx, m, p, followTrailing); err != nil { + return nil, err + } + } +- k := convertPathToKey([]byte(p)) ++ k := convertPathToKey(p) + txn := cc.tree.Txn() + root = txn.Root() +- cr, updated, err := cc.checksum(ctx, root, txn, m, k, true) ++ cr, updated, err := cc.checksum(ctx, root, txn, m, k, followTrailing) + if err != nil { + return nil, err + } +@@ -890,9 +850,9 @@ func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (* + return cr, err + } + +-func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte, follow bool) (*CacheRecord, bool, error) { ++func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte, followTrailing bool) (*CacheRecord, bool, error) { + origk := k +- k, cr, err := getFollowLinks(root, k, follow) ++ k, cr, err := getFollowLinks(root, k, followTrailing) + if err != nil { + return nil, false, err + } +@@ -918,7 +878,9 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir + } + h.Write(bytes.TrimPrefix(subk, k)) + +- subcr, _, err := cc.checksum(ctx, root, txn, m, subk, true) ++ // We do not follow trailing links when checksumming a directory's ++ // contents. ++ subcr, _, err := cc.checksum(ctx, root, txn, m, subk, false) + if err != nil { + return nil, false, err + } +@@ -935,7 +897,7 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir + dgst = digest.NewDigest(digest.SHA256, h) + + default: +- p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0}))) ++ p := convertKeyToPath(bytes.TrimSuffix(k, []byte{0})) + + target, err := m.mount(ctx) + if err != nil { +@@ -967,42 +929,82 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir + return cr2, true, nil + } + +-// needsScan returns false if path is in the tree or a parent path is in tree +-// and subpath is missing +-func (cc *cacheContext) needsScan(root *iradix.Node, p string) (bool, error) { +- var linksWalked int +- return cc.needsScanFollow(root, p, &linksWalked) ++// pathSet is a set of path prefixes that can be used to see if a given path is ++// lexically a child of any path in the set. All paths provided to this set ++// MUST be absolute and use / as the separator. ++type pathSet struct { ++ // prefixes contains paths of the form "/a/b/", so that we correctly detect ++ // /a/b as being a parent of /a/b/c but not /a/bc. ++ prefixes []string + } + +-func (cc *cacheContext) needsScanFollow(root *iradix.Node, p string, linksWalked *int) (bool, error) { +- if p == "/" { +- p = "" +- } +- v, ok := root.Get(convertPathToKey([]byte(p))) +- if !ok { +- if p == "" { +- return true, nil ++// add a path to the set. This is a no-op if includes(path) == true. ++func (s *pathSet) add(p string) { ++ // Ensure the path is absolute and clean. ++ p = path.Join("/", p) ++ if !s.includes(p) { ++ if p != "/" { ++ p += "/" + } +- return cc.needsScanFollow(root, path.Clean(path.Dir(p)), linksWalked) ++ s.prefixes = append(s.prefixes, p) ++ } ++} ++ ++// includes returns true iff there is a path in the pathSet which is a lexical ++// parent of the given path. The provided path MUST be an absolute path and ++// MUST NOT contain any ".." components, as they will be path.Clean'd. ++func (s pathSet) includes(p string) bool { ++ // Ensure the path is absolute and clean. ++ p = path.Join("/", p) ++ if p != "/" { ++ p += "/" + } +- cr := v.(*CacheRecord) +- if cr.Type == CacheRecordTypeSymlink { +- if *linksWalked > 255 { +- return false, errTooManyLinks ++ for _, prefix := range s.prefixes { ++ if strings.HasPrefix(p, prefix) { ++ return true + } +- *linksWalked++ +- link := path.Clean(cr.Linkname) +- if !path.IsAbs(cr.Linkname) { +- link = path.Join("/", path.Dir(p), link) ++ } ++ return false ++} ++ ++// needsScan returns false if path is in the tree or a parent path is in tree ++// and subpath is missing. ++func (cc *cacheContext) needsScan(root *iradix.Node, path string, followTrailing bool) (bool, error) { ++ var ( ++ goodPaths pathSet ++ hasParentInTree bool ++ ) ++ k := convertPathToKey(path) ++ _, cr, err := getFollowLinksCallback(root, k, followTrailing, func(subpath string, cr *CacheRecord) error { ++ // If we found a path that exists in the cache, add it to the set of ++ // known-scanned paths. Otherwise, verify whether the not-found subpath ++ // is inside a known-scanned path (we might have hit a "..", taking us ++ // out of the scanned paths, or we might hit a non-existent path inside ++ // a scanned path). getFollowLinksCallback iterates left-to-right, so ++ // we will always hit ancestors first. ++ if cr != nil { ++ hasParentInTree = cr.Type != CacheRecordTypeSymlink ++ goodPaths.add(subpath) ++ } else { ++ hasParentInTree = goodPaths.includes(subpath) + } +- return cc.needsScanFollow(root, link, linksWalked) ++ return nil ++ }) ++ if err != nil { ++ return false, err + } +- return false, nil ++ return cr == nil && !hasParentInTree, nil + } + +-func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retErr error) { ++// Only used by TestNeedScanChecksumRegression to make sure scanPath is not ++// called for paths we have already scanned. ++var ( ++ scanCounterEnable bool ++ scanCounter atomic.Uint64 ++) ++ ++func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string, followTrailing bool) (retErr error) { + p = path.Join("/", p) +- d, _ := path.Split(p) + + mp, err := m.mount(ctx) + if err != nil { +@@ -1012,33 +1014,42 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr + n := cc.tree.Root() + txn := cc.tree.Txn() + +- parentPath, err := rootPath(mp, filepath.FromSlash(d), func(p, link string) error { ++ resolvedPath, err := rootPath(mp, filepath.FromSlash(p), followTrailing, func(p, link string) error { + cr := &CacheRecord{ + Type: CacheRecordTypeSymlink, + Linkname: filepath.ToSlash(link), + } +- k := []byte(path.Join("/", filepath.ToSlash(p))) +- k = convertPathToKey(k) +- txn.Insert(k, cr) ++ p = path.Join("/", filepath.ToSlash(p)) ++ txn.Insert(convertPathToKey(p), cr) + return nil + }) + if err != nil { + return err + } + +- err = filepath.Walk(parentPath, func(itemPath string, fi os.FileInfo, err error) error { ++ // Scan the parent directory of the path we resolved, unless we're at the ++ // root (in which case we scan the root). ++ scanPath := filepath.Dir(resolvedPath) ++ if !strings.HasPrefix(filepath.ToSlash(scanPath)+"/", filepath.ToSlash(mp)+"/") { ++ scanPath = resolvedPath ++ } ++ ++ err = filepath.Walk(scanPath, func(itemPath string, fi os.FileInfo, err error) error { ++ if scanCounterEnable { ++ scanCounter.Add(1) ++ } + if err != nil { ++ // If the root doesn't exist, ignore the error. ++ if itemPath == scanPath && errors.Is(err, os.ErrNotExist) { ++ return nil ++ } + return errors.Wrapf(err, "failed to walk %s", itemPath) + } + rel, err := filepath.Rel(mp, itemPath) + if err != nil { + return err + } +- k := []byte(path.Join("/", filepath.ToSlash(rel))) +- if string(k) == "/" { +- k = []byte{} +- } +- k = convertPathToKey(k) ++ k := convertPathToKey(keyPath(rel)) + if _, ok := n.Get(k); !ok { + cr := &CacheRecord{ + Type: CacheRecordTypeFile, +@@ -1071,55 +1082,118 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr + return nil + } + +-func getFollowLinks(root *iradix.Node, k []byte, follow bool) ([]byte, *CacheRecord, error) { +- var linksWalked int +- return getFollowLinksWalk(root, k, follow, &linksWalked) ++// followLinksCallback is called after we try to resolve each element. If the ++// path was not found, cr is nil. ++type followLinksCallback func(path string, cr *CacheRecord) error ++ ++// getFollowLinks is shorthand for getFollowLinksCallback(..., nil). ++func getFollowLinks(root *iradix.Node, k []byte, followTrailing bool) ([]byte, *CacheRecord, error) { ++ return getFollowLinksCallback(root, k, followTrailing, nil) + } + +-func getFollowLinksWalk(root *iradix.Node, k []byte, follow bool, linksWalked *int) ([]byte, *CacheRecord, error) { ++// getFollowLinksCallback looks up the requested key, fully resolving any ++// symlink components encountered. The implementation is heavily based on ++// . ++// ++// followTrailing indicates whether the *final component* of the path should be ++// resolved (effectively O_PATH|O_NOFOLLOW). Note that (in contrast to some ++// Linux APIs), followTrailing is obeyed even if the key has a trailing slash ++// (though paths like "foo/link/." will cause the link to be resolved). ++// ++// cb is a callback that is called for each path component encountered during ++// path resolution (after the path component is looked up in the cache). This ++// means for a path like /a/b/c, the callback will be called for at least ++// ++// {/, /a, /a/b, /a/b/c} ++// ++// Note that if any of the components are symlinks, the paths will depend on ++// the symlink contents and there will be more callbacks. If the requested key ++// has a trailing slash, the callback will also be called for the final ++// trailing-slash lookup (/a/b/c/ in the above example). Note that ++// getFollowLinksCallback will try to look up the original key directly first ++// and the callback is not called for this first lookup. ++func getFollowLinksCallback(root *iradix.Node, k []byte, followTrailing bool, cb followLinksCallback) ([]byte, *CacheRecord, error) { + v, ok := root.Get(k) +- if ok { ++ if ok && (!followTrailing || v.(*CacheRecord).Type != CacheRecordTypeSymlink) { + return k, v.(*CacheRecord), nil + } +- if !follow || len(k) == 0 { ++ if len(k) == 0 { + return k, nil, nil + } + +- dir, file := splitKey(k) ++ var ( ++ currentPath = "/" ++ remainingPath = convertKeyToPath(k) ++ linksWalked int ++ cr *CacheRecord ++ ) ++ // Trailing slashes are significant for the cache, but path.Clean strips ++ // them. We only care about the slash for the final lookup. ++ remainingPath, hadTrailingSlash := strings.CutSuffix(remainingPath, "/") ++ for remainingPath != "" { ++ // Get next component. ++ var part string ++ if i := strings.IndexRune(remainingPath, '/'); i == -1 { ++ part, remainingPath = remainingPath, "" ++ } else { ++ part, remainingPath = remainingPath[:i], remainingPath[i+1:] ++ } + +- k, parent, err := getFollowLinksWalk(root, dir, follow, linksWalked) +- if err != nil { +- return nil, nil, err +- } +- if parent != nil { +- if parent.Type == CacheRecordTypeSymlink { +- *linksWalked++ +- if *linksWalked > 255 { +- return nil, nil, errors.Errorf("too many links") ++ // Apply the component to the path. Since it is a single component, and ++ // our current path contains no symlinks, we can just apply it ++ // leixically. ++ nextPath := keyPath(path.Join("/", currentPath, part)) ++ // In contrast to rootPath, we don't skip lookups for no-op components ++ // or / because we need to call the callback for every path component ++ // we hit (including /) and we need to make sure that the CacheRecord ++ // we return is correct after every iteration. ++ ++ cr = nil ++ v, ok := root.Get(convertPathToKey(nextPath)) ++ if ok { ++ cr = v.(*CacheRecord) ++ } ++ if cb != nil { ++ if err := cb(nextPath, cr); err != nil { ++ return nil, nil, err + } ++ } ++ if !ok || cr.Type != CacheRecordTypeSymlink { ++ currentPath = nextPath ++ continue ++ } ++ if !followTrailing && remainingPath == "" { ++ currentPath = nextPath ++ break ++ } + +- link := cleanLink(string(convertKeyToPath(dir)), parent.Linkname) +- return getFollowLinksWalk(root, append(convertPathToKey([]byte(link)), file...), follow, linksWalked) ++ linksWalked++ ++ if linksWalked > maxSymlinkLimit { ++ return nil, nil, errTooManyLinks + } +- } +- k = append(k, file...) +- v, ok = root.Get(k) +- if ok { +- return k, v.(*CacheRecord), nil +- } +- return k, nil, nil +-} + +-func cleanLink(dir, linkname string) string { +- dirPath := path.Clean(dir) +- if dirPath == "." || dirPath == "/" { +- dirPath = "" ++ remainingPath = cr.Linkname + "/" + remainingPath ++ if path.IsAbs(cr.Linkname) { ++ currentPath = "/" ++ } + } +- link := path.Clean(linkname) +- if !path.IsAbs(link) { +- return path.Join("/", path.Join(path.Dir(dirPath), link)) ++ // We've already looked up the final component. However, if there was a ++ // trailing slash in the original path, we need to do the lookup again with ++ // the slash applied. ++ if hadTrailingSlash { ++ cr = nil ++ currentPath += "/" ++ v, ok := root.Get(convertPathToKey(currentPath)) ++ if ok { ++ cr = v.(*CacheRecord) ++ } ++ if cb != nil { ++ if err := cb(currentPath, cr); err != nil { ++ return nil, nil, err ++ } ++ } + } +- return link ++ return convertPathToKey(currentPath), cr, nil + } + + func prepareDigest(fp, p string, fi os.FileInfo) (digest.Digest, error) { +@@ -1176,25 +1250,10 @@ func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) { + return + } + +-func convertPathToKey(p []byte) []byte { ++func convertPathToKey(p string) []byte { + return bytes.Replace([]byte(p), []byte("/"), []byte{0}, -1) + } + +-func convertKeyToPath(p []byte) []byte { +- return bytes.Replace([]byte(p), []byte{0}, []byte("/"), -1) +-} +- +-func splitKey(k []byte) ([]byte, []byte) { +- foundBytes := false +- i := len(k) - 1 +- for { +- if i <= 0 || foundBytes && k[i] == 0 { +- break +- } +- if k[i] != 0 { +- foundBytes = true +- } +- i-- +- } +- return append([]byte{}, k[:i]...), k[i:] ++func convertKeyToPath(p []byte) string { ++ return string(bytes.Replace(p, []byte{0}, []byte("/"), -1)) + } +diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/path.go b/vendor/github.com/moby/buildkit/cache/contenthash/path.go +index 42b7fd8349c7..ae950f713241 100644 +--- a/vendor/github.com/moby/buildkit/cache/contenthash/path.go ++++ b/vendor/github.com/moby/buildkit/cache/contenthash/path.go +@@ -1,108 +1,111 @@ ++// This code mostly comes from . ++ ++// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. ++// Copyright (C) 2017-2024 SUSE LLC. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ + package contenthash + + import ( + "os" + "path/filepath" ++ "strings" + + "github.com/pkg/errors" + ) + +-var ( +- errTooManyLinks = errors.New("too many links") +-) ++var errTooManyLinks = errors.New("too many links") ++ ++const maxSymlinkLimit = 255 + + type onSymlinkFunc func(string, string) error + +-// rootPath joins a path with a root, evaluating and bounding any +-// symlink to the root directory. +-// This is containerd/continuity/fs RootPath implementation with a callback on +-// resolving the symlink. +-func rootPath(root, path string, cb onSymlinkFunc) (string, error) { +- if path == "" { ++// rootPath joins a path with a root, evaluating and bounding any symlink to ++// the root directory. This is a slightly modified version of SecureJoin from ++// github.com/cyphar/filepath-securejoin, with a callback which we call after ++// each symlink resolution. ++func rootPath(root, unsafePath string, followTrailing bool, cb onSymlinkFunc) (string, error) { ++ if unsafePath == "" { + return root, nil + } +- var linksWalked int // to protect against cycles +- for { +- i := linksWalked +- newpath, err := walkLinks(root, path, &linksWalked, cb) +- if err != nil { +- return "", err +- } +- path = newpath +- if i == linksWalked { +- newpath = filepath.Join("/", newpath) +- if path == newpath { +- return filepath.Join(root, newpath), nil +- } +- path = newpath +- } +- } +-} + +-func walkLink(root, path string, linksWalked *int, cb onSymlinkFunc) (newpath string, islink bool, err error) { +- if *linksWalked > 255 { +- return "", false, errTooManyLinks +- } ++ unsafePath = filepath.FromSlash(unsafePath) ++ var ( ++ currentPath string ++ linksWalked int ++ ) ++ for unsafePath != "" { ++ // Windows-specific: remove any drive letters from the path. ++ if v := filepath.VolumeName(unsafePath); v != "" { ++ unsafePath = unsafePath[len(v):] ++ } + +- path = filepath.Join("/", path) +- if path == "/" { +- return path, false, nil +- } +- realPath := filepath.Join(root, path) ++ // Remove any unnecessary trailing slashes. ++ unsafePath = strings.TrimSuffix(unsafePath, string(filepath.Separator)) + +- fi, err := os.Lstat(realPath) +- if err != nil { +- // If path does not yet exist, treat as non-symlink +- if errors.Is(err, os.ErrNotExist) { +- return path, false, nil ++ // Get the next path component. ++ var part string ++ if i := strings.IndexRune(unsafePath, filepath.Separator); i == -1 { ++ part, unsafePath = unsafePath, "" ++ } else { ++ part, unsafePath = unsafePath[:i], unsafePath[i+1:] + } +- return "", false, err +- } +- if fi.Mode()&os.ModeSymlink == 0 { +- return path, false, nil +- } +- newpath, err = os.Readlink(realPath) +- if err != nil { +- return "", false, err +- } +- if cb != nil { +- if err := cb(path, newpath); err != nil { +- return "", false, err +- } +- } +- *linksWalked++ +- return newpath, true, nil +-} + +-func walkLinks(root, path string, linksWalked *int, cb onSymlinkFunc) (string, error) { +- switch dir, file := filepath.Split(path); { +- case dir == "": +- newpath, _, err := walkLink(root, file, linksWalked, cb) +- return newpath, err +- case file == "": +- if os.IsPathSeparator(dir[len(dir)-1]) { +- if dir == "/" { +- return dir, nil +- } +- return walkLinks(root, dir[:len(dir)-1], linksWalked, cb) ++ // Apply the component lexically to the path we are building. path does ++ // not contain any symlinks, and we are lexically dealing with a single ++ // component, so it's okay to do filepath.Clean here. ++ nextPath := filepath.Join(string(filepath.Separator), currentPath, part) ++ if nextPath == string(filepath.Separator) { ++ // If we end up back at the root, we don't need to re-evaluate /. ++ currentPath = "" ++ continue + } +- newpath, _, err := walkLink(root, dir, linksWalked, cb) +- return newpath, err +- default: +- newdir, err := walkLinks(root, dir, linksWalked, cb) +- if err != nil { ++ fullPath := root + string(filepath.Separator) + nextPath ++ ++ // Figure out whether the path is a symlink. ++ fi, err := os.Lstat(fullPath) ++ if err != nil && !errors.Is(err, os.ErrNotExist) { + return "", err + } +- newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked, cb) ++ // Treat non-existent path components the same as non-symlinks (we ++ // can't do any better here). ++ if errors.Is(err, os.ErrNotExist) || fi.Mode()&os.ModeSymlink == 0 { ++ currentPath = nextPath ++ continue ++ } ++ // Don't resolve the final component with !followTrailing. ++ if !followTrailing && unsafePath == "" { ++ currentPath = nextPath ++ break ++ } ++ ++ // It's a symlink, so get its contents and expand it by prepending it ++ // to the yet-unparsed path. ++ linksWalked++ ++ if linksWalked > maxSymlinkLimit { ++ return "", errTooManyLinks ++ } ++ ++ dest, err := os.Readlink(fullPath) + if err != nil { + return "", err + } +- if !islink { +- return newpath, nil ++ if cb != nil { ++ if err := cb(nextPath, dest); err != nil { ++ return "", err ++ } + } +- if filepath.IsAbs(newpath) { +- return newpath, nil ++ ++ unsafePath = dest + string(filepath.Separator) + unsafePath ++ // Absolute symlinks reset any work we've already done. ++ if filepath.IsAbs(dest) { ++ currentPath = "" + } +- return filepath.Join(newdir, newpath), nil + } ++ ++ // There should be no lexical components left in path here, but just for ++ // safety do a filepath.Clean before the join. ++ finalPath := filepath.Join(string(filepath.Separator), currentPath) ++ return filepath.Join(root, finalPath), nil + } +diff --git a/vendor/modules.txt b/vendor/modules.txt +index 7f3e6497785d..247f49f3518e 100644 +--- a/vendor/modules.txt ++++ b/vendor/modules.txt +@@ -711,7 +711,7 @@ github.com/mitchellh/hashstructure/v2 + # github.com/mitchellh/reflectwalk v1.0.2 + ## explicit + github.com/mitchellh/reflectwalk +-# github.com/moby/buildkit v0.13.2 ++# github.com/moby/buildkit v0.13.2 => github.com/cyphar/buildkit v0.0.0-20240624075140-0db2d2345b94 + ## explicit; go 1.21 + github.com/moby/buildkit/api/services/control + github.com/moby/buildkit/api/types +@@ -1610,3 +1610,4 @@ tags.cncf.io/container-device-interface/pkg/parser + # tags.cncf.io/container-device-interface/specs-go v0.7.0 + ## explicit; go 1.19 + tags.cncf.io/container-device-interface/specs-go ++# github.com/moby/buildkit => github.com/cyphar/buildkit v0.0.0-20240624075140-0db2d2345b94 +-- +2.45.2 + diff --git a/0007-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch b/0007-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch new file mode 100644 index 0000000..e996b3c --- /dev/null +++ b/0007-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch @@ -0,0 +1,53 @@ +From 62035ba22a45bde6bed2da321e7ad954f5b461b4 Mon Sep 17 00:00:00 2001 +From: Aleksa Sarai +Date: Wed, 19 Jun 2024 16:30:49 +1000 +Subject: [PATCH 7/7] bsc1214855: volume: use AtomicWriteFile to save volume + options + +If the system (or Docker) crashes while saivng the volume options, on +restart the daemon will error out when trying to read the options file +because it doesn't contain valid JSON. + +In such a crash scenario, the new volume will be treated as though it +has the default options configuration. This is not ideal, but volumes +created on very old Docker versions (pre-1.11[1], circa 2016) do not +have opts.json and so doing some kind of cleanup when loading the volume +store (even if we take care to only delete empty volumes) could delete +existing volumes carried over from very old Docker versions that users +would not expect to disappear. + +Ultimately, if a user creates a volume and the system crashes, a volume +that has the wrong config is better than Docker not being able to start. + +[1]: commit b05b2370757d ("Support mount opts for `local` volume driver") + +SUSE-Bugs: https://bugzilla.suse.com/show_bug.cgi?id=1214855 +Signed-off-by: Aleksa Sarai +--- + volume/local/local.go | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/volume/local/local.go b/volume/local/local.go +index 6e96aeea4189..4412f34a3da9 100644 +--- a/volume/local/local.go ++++ b/volume/local/local.go +@@ -17,6 +17,7 @@ import ( + "github.com/docker/docker/daemon/names" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/idtools" ++ "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/quota" + "github.com/docker/docker/volume" + "github.com/pkg/errors" +@@ -388,7 +389,7 @@ func (v *localVolume) saveOpts() error { + if err != nil { + return err + } +- err = os.WriteFile(filepath.Join(v.rootPath, "opts.json"), b, 0o600) ++ err = ioutils.AtomicWriteFile(filepath.Join(v.rootPath, "opts.json"), b, 0o600) + if err != nil { + return errdefs.System(errors.Wrap(err, "error while persisting volume options")) + } +-- +2.45.2 + diff --git a/80-docker.rules b/80-docker.rules new file mode 100644 index 0000000..a3dac1c --- /dev/null +++ b/80-docker.rules @@ -0,0 +1,5 @@ +# hide docker's loopback devices from udisks, and thus from user desktops +SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" +SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" + + diff --git a/README_SUSE.md b/README_SUSE.md new file mode 100644 index 0000000..fb0ee48 --- /dev/null +++ b/README_SUSE.md @@ -0,0 +1,230 @@ +# Abstract + +Docker is a lightweight "virtualization" method to run multiple virtual units +(containers, akin to “chroot”) simultaneously on a single control host. +Containers are isolated with Kernel Control Groups (cgroups) and Kernel Namespaces. + +Docker provides an operating system-level virtualization where the Kernel +controls the isolated containers. With other full virtualization solutions +like Xen, KVM, or libvirt the processor simulates a complete hardware +environment and controls its virtual machines. + +# Terminology + +## chroot + +A change root (chroot, or change root jail) is a section in the file system +which is isolated from the rest of the file system. For this purpose, the chroot +command is used to change the root of the file system. A program which is +executed in such a “chroot jail” cannot access files outside the designated +directory tree. + +## cgroups + +Kernel Control Groups (commonly referred to as just “cgroups”) are a Kernel +feature that allows aggregating or partitioning tasks (processes) and all their +children into hierarchical organized groups to isolate resources. + +## Image + +A "virtual machine" on the host server that can run any Linux system, for +example openSUSE, SUSE Linux Enterprise Desktop, or SUSE Linux Enterprise Server. + +A Docker image is made by a series of layers built one over the other. Each layer +corresponds to a permanent change committed from a container to the image. + +For more details checkout [Docker's official documentation](http://docs.docker.com/terms/image/). + +## Image Name + +A name that refers to an image. The name is used by the docker commands. + +## Container + +A running Docker Image. + +## Container ID + +A ID that refers to a container. The ID is used by the docker commands. + +## TAG + +A string associated to a Image. It commonly used to identify a specific version +of a Image (like tags in version control systems). It is also possible to refer +the same Image with different TAGs. + +## Kernel Namespaces + +A Kernel feature to isolate some resources like network, users, and others for +a group of processes. + +## Docker Host Server + +The system that runs the Docker daemon, provides the images, and the management +control capabilities through cgroups. + + +# Overview + +Docker is a platform that allows developers and sysadmins to manage the complete +lifecycle of images. + +Docker makes incredibly easy to build, ship and run images containing +applications. + +Benefits of Docker: + + * Isolating applications and operating systems through containers. + * Providing nearly native performance as Docker manages allocation of resources + in real-time. + * Controlling network interfaces and applying resources inside containers through cgroups. + * Versioning of images. + * Building images based on existing ones. + * Sharining/storing on [public](http://docs.docker.com/docker-hub/) or + [private](http://docs.docker.com/userguide/dockerrepos/#private-repositories) + repositories. + +Limitations of Docker: + + * All Docker containers are running inside the host system's Kernel and not with + a different Kernel. + * Only allows Linux "guest" operating systems. + * Docker is not a full virtualization stack like Xen, KVM, or libvirt. + * Security depends on the host system. Refer to the [official documentation](http://docs.docker.com/articles/security/) + for more details. + +## Container drivers + +Docker has different backend drivers to handle the containers. The recommended +on is [libcontainer](https://github.com/docker/libcontainer), which is also the +default choice. This driver provides direct access with cgroups. + +The Docker packages ships also a LXC driver which handles containers using the +LXC tools. + +At the time of writing, upstream is working on a `libvirt-lxc` driver. + +## Storage drivers + +Docker supports different storage drivers: + + * `vfs`: this driver is automatically used when the Docker host filesystem + does not support copy-on-write. This is a simple driver which does not offer + some of the advantages of Docker (like sharing layers, more on that in the + next sections). It is highly reliable but also slow. + * `devicemapper`: this driver relies on the device-mapper thin provisioning + module. It supports copy-on-write, hence it offers all the advantages of + Docker. + * `btrfs`: this driver relies on Btrfs to provide all the features required + by Docker. To use this driver the `/var/lib/docker` directory must be on a + btrfs filesystem. + * `AUFS`: this driver relies on AUFS union filesystem. Neither the upstream + kernel nor the SUSE one supports this filesystem. Hence the AUFS driver is + not built into the SUSE Docker package. + +It is possible to specify which driver to use by changing the value of the +`DOCKER_OPTS` variable defined inside of the `/etc/sysconfig/docker` file. +This can be done either manually or using &yast; by browsing to: + * System + * /etc/sysconfig Editor + * System + * Management + * DOCKER_OPTS +menu and entering the `-s storage_driver` string. + +For example, to force the usage of the `devicemapper` driver +enter the following text: +``` +DOCKER_OPTS="-s devicemapper +``` + +It is recommended to have `/var/lib/docker` mounted on a different filesystem +to not affect the Docker host OS in case of a filesystem corruption. + +# Setting up a Docker host + +Prepare the host: + + 1. Install the `docker` package. + 2. Automatically start the Docker daemon at boot: + `sudo systemctl enable docker` + 3. Start the Docker daemon: + `sudo systemctl start docker` + +The Docker daemon listens on a local socket which is accessible only by the `root` +user and by the members of the `docker` group. + +The `docker` group is automatically created at package installation time. To +allow a certain user to connect to the local Docker daemon use the following +command: + +``` +sudo /usr/sbin/usermod -aG docker +``` + +The user will be able to communicate with the local Docker daemon upon his next +login. + +## Networking + +If you want your containers to be able to access the external network you must +enable the `net.ipv4.ip_forward` rule. +This can be done using YaST by browsing to the +`Network Devices -> Network Settings -> Routing` menu and ensuring that the +`Enable IPv4 Forwarding` box is checked. + +This option cannot be changed when networking is handled by the Network Manager. +In such cases the `/etc/sysconfig/SuSEfirewall2` file needs to be edited by +hand to ensure the `FW_ROUTE` flag is set to `yes` like so: + +``` + FW_ROUTE="yes" +``` + + +# Basic Docker operations + +Images can be pulled from [Docker's central index](http://index.docker.io) using +the following command: + +``` +docker pull +``` + +Containers can be started using the `docker run` command. + +Please refer to the [official documentation](http://docs.docker.com/) +for more details. + + +# Building Docker containers using KIWI + +Starting from version 5.06.8 KIWI can be used to build Docker images. +Please refer to KIWI's [official documentation](https://doc.opensuse.org/projects/kiwi/doc/#chap.lxc). +The official `kiwi-doc` package contains examples of Docker images. + +## Docker build system versus KIWI + +Docker has an [internal build system](http://docs.docker.com/reference/builder/) +which makes incredibly easy to create new images based on existing ones. + +Some users might be confused about what to use. The right approach is to build +the [base images](http://docs.docker.com/terms/image/#base-image-def) using KIWI +and then use them as foundation blocks inside of your Docker's build system. + +That two advantages: + + 1. Be able to use docker specific directives (like `ENTRYPOINT`, `EXPOSE`, ...). + 2. Be able to reuse already existing layers. + +Sharing the common layers between different images makes possible to: + + * Use less disk space on the Docker hosts. + * Make the deployments faster: only the requested layers are sent over the + network (it is like upgrading installed packages using delta rpms). + * Take full advantage of caching while building Docker images: this will result + in faster executions of `docker build` command. + +To recap: KIWI is not to be intended as a replacement for Docker's build system. +It rather complements with it. + diff --git a/_service b/_service new file mode 100644 index 0000000..f0ba9d6 --- /dev/null +++ b/_service @@ -0,0 +1,22 @@ + + + https://github.com/moby/moby.git + git + .git + 26.1.5_ce_%h + v26.1.5 + docker + + + https://github.com/docker/cli.git + git + .git + 26.1.5_ce + v26.1.5 + docker-cli + + + docker-*.tar + xz + + diff --git a/cli-0001-docs-include-required-tools-in-source-tree.patch b/cli-0001-docs-include-required-tools-in-source-tree.patch new file mode 100644 index 0000000..2facc4f --- /dev/null +++ b/cli-0001-docs-include-required-tools-in-source-tree.patch @@ -0,0 +1,23382 @@ +From 3701285f1cf678dda730e3f9a4860d74ca54057d Mon Sep 17 00:00:00 2001 +From: danishprakash +Date: Mon, 12 Feb 2024 18:07:06 +0530 +Subject: [PATCH] docs: include required tools in source tree + +In order to be able to build the documentation without internet access +(as is required by some distribution build systems), all of the source +code needed for the build needs to be available in the source tarball. + +This used to be possible with the docker-cli sources but was +accidentally broken with some CI changes that switched to downloading +the tools (by modifying go.mod as part of the docs build script). + +This pattern also maked documentation builds less reproducible since the +tool version used was not based on the source code version. + +Fixes: commit 7dc35c03fca5 ("validate manpages target") +Fixes: commit a650f4ddd008 ("switch to cli-docs-tool for yaml docs generation") +Signed-off-by: Aleksa Sarai +--- + docs/generate/go.mod | 13 - + docs/generate/tools.go | 8 - + import.go | 17 + + man/tools.go | 11 - + scripts/docs/generate-man.sh | 33 +- + scripts/docs/generate-md.sh | 36 +- + scripts/docs/generate-yaml.sh | 29 +- + vendor.mod | 5 + + vendor.sum | 4 + + .../cpuguy83/go-md2man/v2/.gitignore | 2 + + .../cpuguy83/go-md2man/v2/.golangci.yml | 6 + + .../cpuguy83/go-md2man/v2/Dockerfile | 20 + + .../cpuguy83/go-md2man/v2/LICENSE.md | 21 + + .../github.com/cpuguy83/go-md2man/v2/Makefile | 35 + + .../cpuguy83/go-md2man/v2/README.md | 15 + + .../cpuguy83/go-md2man/v2/go-md2man.1.md | 28 + + .../cpuguy83/go-md2man/v2/md2man.go | 53 + + .../cpuguy83/go-md2man/v2/md2man/md2man.go | 16 + + .../cpuguy83/go-md2man/v2/md2man/roff.go | 348 ++ + .../docker/cli-docs-tool/.dockerignore | 2 + + .../docker/cli-docs-tool/.gitignore | 2 + + .../docker/cli-docs-tool/.golangci.yml | 37 + + .../docker/cli-docs-tool/Dockerfile | 86 + + .../github.com/docker/cli-docs-tool/LICENSE | 202 ++ + .../github.com/docker/cli-docs-tool/README.md | 67 + + .../cli-docs-tool/annotation/annotation.go | 25 + + .../docker/cli-docs-tool/clidocstool.go | 123 + + .../docker/cli-docs-tool/clidocstool_md.go | 280 ++ + .../docker/cli-docs-tool/clidocstool_yaml.go | 435 +++ + .../docker/cli-docs-tool/docker-bake.hcl | 51 + + .../docker/cli-docs-tool/markdown.go | 87 + + .../russross/blackfriday/v2/.gitignore | 8 + + .../russross/blackfriday/v2/.travis.yml | 17 + + .../russross/blackfriday/v2/LICENSE.txt | 29 + + .../russross/blackfriday/v2/README.md | 335 ++ + .../russross/blackfriday/v2/block.go | 1612 +++++++++ + .../github.com/russross/blackfriday/v2/doc.go | 46 + + .../russross/blackfriday/v2/entities.go | 2236 ++++++++++++ + .../github.com/russross/blackfriday/v2/esc.go | 70 + + .../russross/blackfriday/v2/html.go | 952 ++++++ + .../russross/blackfriday/v2/inline.go | 1228 +++++++ + .../russross/blackfriday/v2/markdown.go | 950 ++++++ + .../russross/blackfriday/v2/node.go | 360 ++ + .../russross/blackfriday/v2/smartypants.go | 457 +++ + vendor/github.com/spf13/cobra/doc/man_docs.go | 246 ++ + vendor/github.com/spf13/cobra/doc/md_docs.go | 158 + + .../github.com/spf13/cobra/doc/rest_docs.go | 186 + + vendor/github.com/spf13/cobra/doc/util.go | 52 + + .../github.com/spf13/cobra/doc/yaml_docs.go | 175 + + vendor/gopkg.in/yaml.v3/LICENSE | 50 + + vendor/gopkg.in/yaml.v3/NOTICE | 13 + + vendor/gopkg.in/yaml.v3/README.md | 150 + + vendor/gopkg.in/yaml.v3/apic.go | 747 ++++ + vendor/gopkg.in/yaml.v3/decode.go | 1000 ++++++ + vendor/gopkg.in/yaml.v3/emitterc.go | 2020 +++++++++++ + vendor/gopkg.in/yaml.v3/encode.go | 577 ++++ + vendor/gopkg.in/yaml.v3/parserc.go | 1258 +++++++ + vendor/gopkg.in/yaml.v3/readerc.go | 434 +++ + vendor/gopkg.in/yaml.v3/resolve.go | 326 ++ + vendor/gopkg.in/yaml.v3/scannerc.go | 3038 +++++++++++++++++ + vendor/gopkg.in/yaml.v3/sorter.go | 134 + + vendor/gopkg.in/yaml.v3/writerc.go | 48 + + vendor/gopkg.in/yaml.v3/yaml.go | 698 ++++ + vendor/gopkg.in/yaml.v3/yamlh.go | 807 +++++ + vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 ++ + vendor/modules.txt | 15 + + 66 files changed, 22631 insertions(+), 96 deletions(-) + delete mode 100644 docs/generate/go.mod + delete mode 100644 docs/generate/tools.go + create mode 100644 import.go + delete mode 100644 man/tools.go + create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/.gitignore + create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml + create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile + create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md + create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/Makefile + create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/README.md + create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md + create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man.go + create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go + create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go + create mode 100644 vendor/github.com/docker/cli-docs-tool/.dockerignore + create mode 100644 vendor/github.com/docker/cli-docs-tool/.gitignore + create mode 100644 vendor/github.com/docker/cli-docs-tool/.golangci.yml + create mode 100644 vendor/github.com/docker/cli-docs-tool/Dockerfile + create mode 100644 vendor/github.com/docker/cli-docs-tool/LICENSE + create mode 100644 vendor/github.com/docker/cli-docs-tool/README.md + create mode 100644 vendor/github.com/docker/cli-docs-tool/annotation/annotation.go + create mode 100644 vendor/github.com/docker/cli-docs-tool/clidocstool.go + create mode 100644 vendor/github.com/docker/cli-docs-tool/clidocstool_md.go + create mode 100644 vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go + create mode 100644 vendor/github.com/docker/cli-docs-tool/docker-bake.hcl + create mode 100644 vendor/github.com/docker/cli-docs-tool/markdown.go + create mode 100644 vendor/github.com/russross/blackfriday/v2/.gitignore + create mode 100644 vendor/github.com/russross/blackfriday/v2/.travis.yml + create mode 100644 vendor/github.com/russross/blackfriday/v2/LICENSE.txt + create mode 100644 vendor/github.com/russross/blackfriday/v2/README.md + create mode 100644 vendor/github.com/russross/blackfriday/v2/block.go + create mode 100644 vendor/github.com/russross/blackfriday/v2/doc.go + create mode 100644 vendor/github.com/russross/blackfriday/v2/entities.go + create mode 100644 vendor/github.com/russross/blackfriday/v2/esc.go + create mode 100644 vendor/github.com/russross/blackfriday/v2/html.go + create mode 100644 vendor/github.com/russross/blackfriday/v2/inline.go + create mode 100644 vendor/github.com/russross/blackfriday/v2/markdown.go + create mode 100644 vendor/github.com/russross/blackfriday/v2/node.go + create mode 100644 vendor/github.com/russross/blackfriday/v2/smartypants.go + create mode 100644 vendor/github.com/spf13/cobra/doc/man_docs.go + create mode 100644 vendor/github.com/spf13/cobra/doc/md_docs.go + create mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.go + create mode 100644 vendor/github.com/spf13/cobra/doc/util.go + create mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.go + create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE + create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE + create mode 100644 vendor/gopkg.in/yaml.v3/README.md + create mode 100644 vendor/gopkg.in/yaml.v3/apic.go + create mode 100644 vendor/gopkg.in/yaml.v3/decode.go + create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go + create mode 100644 vendor/gopkg.in/yaml.v3/encode.go + create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go + create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go + create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go + create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go + create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go + create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go + create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go + create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go + create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go + +diff --git a/docs/generate/go.mod b/docs/generate/go.mod +deleted file mode 100644 +index d62ff455713a..000000000000 +--- a/docs/generate/go.mod ++++ /dev/null +@@ -1,13 +0,0 @@ +-module github.com/docker/cli/docs/generate +- +-// dummy go.mod to avoid dealing with dependencies specific +-// to docs generation and not really part of the project. +- +-go 1.16 +- +-//require ( +-// github.com/docker/cli v0.0.0+incompatible +-// github.com/docker/cli-docs-tool v0.5.0 +-//) +-// +-//replace github.com/docker/cli v0.0.0+incompatible => ../../ +diff --git a/docs/generate/tools.go b/docs/generate/tools.go +deleted file mode 100644 +index 47510bc49a89..000000000000 +--- a/docs/generate/tools.go ++++ /dev/null +@@ -1,8 +0,0 @@ +-//go:build tools +-// +build tools +- +-package main +- +-import ( +- _ "github.com/docker/cli-docs-tool" +-) +diff --git a/import.go b/import.go +new file mode 100644 +index 000000000000..662a6055146c +--- /dev/null ++++ b/import.go +@@ -0,0 +1,17 @@ ++// This is only used to define imports we need for doc generation. ++ ++//go:build never ++// +build never ++ ++package cli ++ ++import ( ++ // Used for md and yaml doc generation. ++ _ "github.com/docker/cli-docs-tool" ++ ++ // Used for man page generation. ++ _ "github.com/cpuguy83/go-md2man/v2" ++ _ "github.com/spf13/cobra" ++ _ "github.com/spf13/cobra/doc" ++ _ "github.com/spf13/pflag" ++) +diff --git a/man/tools.go b/man/tools.go +deleted file mode 100644 +index 3cafe6533aff..000000000000 +--- a/man/tools.go ++++ /dev/null +@@ -1,11 +0,0 @@ +-//go:build tools +-// +build tools +- +-package main +- +-import ( +- _ "github.com/cpuguy83/go-md2man/v2" +- _ "github.com/spf13/cobra" +- _ "github.com/spf13/cobra/doc" +- _ "github.com/spf13/pflag" +-) +diff --git a/scripts/docs/generate-man.sh b/scripts/docs/generate-man.sh +index 12a4b81199db..1e12a95e9c9a 100755 +--- a/scripts/docs/generate-man.sh ++++ b/scripts/docs/generate-man.sh +@@ -1,35 +1,22 @@ + #!/usr/bin/env bash + +-set -eu +- +-: "${MD2MAN_VERSION=v2.0.3}" ++set -Eeuo pipefail + + export GO111MODULE=auto + +-function clean { +- rm -rf "$buildir" ++# temporary "go.mod" to make -modfile= work ++touch go.mod ++ ++function clean() { ++ rm -f "$(pwd)/go.mod" + } + +-buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX) + trap clean EXIT + +-( +- set -x +- cp -r . "$buildir/" +- cd "$buildir" +- # init dummy go.mod +- ./scripts/vendor init +- # install go-md2man and copy man/tools.go in root folder +- # to be able to fetch the required dependencies +- go mod edit -modfile=vendor.mod -require=github.com/cpuguy83/go-md2man/v2@${MD2MAN_VERSION} +- cp man/tools.go . +- # update vendor +- ./scripts/vendor update +- # build gen-manpages +- go build -mod=vendor -modfile=vendor.mod -tags manpages -o /tmp/gen-manpages ./man/generate.go +- # build go-md2man +- go build -mod=vendor -modfile=vendor.mod -o /tmp/go-md2man ./vendor/github.com/cpuguy83/go-md2man/v2 +-) ++# build gen-manpages ++go build -mod=vendor -modfile=vendor.mod -tags manpages -o /tmp/gen-manpages ./man/generate.go ++# build go-md2man ++go build -mod=vendor -modfile=vendor.mod -o /tmp/go-md2man ./vendor/github.com/cpuguy83/go-md2man/v2 + + mkdir -p man/man1 + (set -x ; /tmp/gen-manpages --root "." --target "$(pwd)/man/man1") +diff --git a/scripts/docs/generate-md.sh b/scripts/docs/generate-md.sh +index 4caa01eaed23..0af86843bbe4 100755 +--- a/scripts/docs/generate-md.sh ++++ b/scripts/docs/generate-md.sh +@@ -1,33 +1,29 @@ + #!/usr/bin/env bash + +-set -eu +- +-: "${CLI_DOCS_TOOL_VERSION=v0.7.0}" ++set -Eeuo pipefail + + export GO111MODULE=auto + ++# temporary "go.mod" to make -modfile= work ++touch go.mod ++ + function clean { +- rm -rf "$buildir" ++ rm -f "$(pwd)/go.mod" ++ if [ -f "$(pwd)/docs/reference/commandline/docker.md" ]; then ++ mv "$(pwd)/docs/reference/commandline/docker.md" "$(pwd)/docs/reference/commandline/cli.md" ++ fi + } + +-buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX) + trap clean EXIT + +-( +- set -x +- cp -r . "$buildir/" +- cd "$buildir" +- # init dummy go.mod +- ./scripts/vendor init +- # install cli-docs-tool and copy docs/tools.go in root folder +- # to be able to fetch the required depedencies +- go mod edit -modfile=vendor.mod -require=github.com/docker/cli-docs-tool@${CLI_DOCS_TOOL_VERSION} +- cp docs/generate/tools.go . +- # update vendor +- ./scripts/vendor update +- # build docsgen +- go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go +-) ++# build docsgen ++go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go ++ ++# yaml generation on docs repo needs the cli.md file: https://github.com/docker/cli/pull/3924#discussion_r1059986605 ++# but markdown generation docker.md atm. While waiting for a fix in cli-docs-tool ++# we need to first move the cli.md file to docker.md, do the generation and ++# then move it back in trap handler. ++mv "$(pwd)/docs/reference/commandline/cli.md" "$(pwd)/docs/reference/commandline/docker.md" + + ( + set -x +diff --git a/scripts/docs/generate-yaml.sh b/scripts/docs/generate-yaml.sh +index 0d67c5e5bb09..7d98e161df5d 100755 +--- a/scripts/docs/generate-yaml.sh ++++ b/scripts/docs/generate-yaml.sh +@@ -1,33 +1,20 @@ + #!/usr/bin/env bash + +-set -eu +- +-: "${CLI_DOCS_TOOL_VERSION=v0.7.0}" ++set -Eeuo pipefail + + export GO111MODULE=auto + +-function clean { +- rm -rf "$buildir" ++# temporary "go.mod" to make -modfile= work ++touch go.mod ++ ++function clean() { ++ rm -f "$(pwd)/go.mod" + } + +-buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX) + trap clean EXIT + +-( +- set -x +- cp -r . "$buildir/" +- cd "$buildir" +- # init dummy go.mod +- ./scripts/vendor init +- # install cli-docs-tool and copy docs/tools.go in root folder +- # to be able to fetch the required depedencies +- go mod edit -modfile=vendor.mod -require=github.com/docker/cli-docs-tool@${CLI_DOCS_TOOL_VERSION} +- cp docs/generate/tools.go . +- # update vendor +- ./scripts/vendor update +- # build docsgen +- go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go +-) ++# build docsgen ++go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go + + mkdir -p docs/yaml + set -x +diff --git a/vendor.mod b/vendor.mod +index 3bc5ce327f0f..a654f78703d6 100644 +--- a/vendor.mod ++++ b/vendor.mod +@@ -11,6 +11,7 @@ require ( + github.com/containerd/platforms v0.2.0 + github.com/creack/pty v1.1.21 + github.com/distribution/reference v0.5.0 ++ github.com/docker/cli-docs-tool v0.6.0 + github.com/docker/distribution v2.8.3+incompatible + github.com/docker/docker v26.1.4-0.20240605103321-de5c9cf0b96e+incompatible // 26.1 branch (v26.1.4-dev) + github.com/docker/docker-credential-helpers v0.8.1 +@@ -53,6 +54,8 @@ require ( + tags.cncf.io/container-device-interface v0.7.2 + ) + ++require github.com/cpuguy83/go-md2man/v2 v2.0.3 ++ + require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect +@@ -83,6 +86,7 @@ require ( + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rivo/uniseg v0.2.0 // indirect ++ github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + go.etcd.io/etcd/raft/v3 v3.5.6 // indirect +@@ -96,4 +100,5 @@ require ( + google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect ++ gopkg.in/yaml.v3 v3.0.1 // indirect + ) +diff --git a/vendor.sum b/vendor.sum +index 6a31c9b2cf62..a0905e657c37 100644 +--- a/vendor.sum ++++ b/vendor.sum +@@ -46,6 +46,7 @@ github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3 + github.com/containerd/platforms v0.2.0 h1:clGNvVIcY3k39VJSYdFGohI1b3bP/eeBUVR5+XA28oo= + github.com/containerd/platforms v0.2.0/go.mod h1:XOM2BS6kN6gXafPLg80V6y/QUib+xoLyC3qVmHzibko= + github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= ++github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= + github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= + github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= +@@ -56,6 +57,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs + github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= + github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= + github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= ++github.com/docker/cli-docs-tool v0.6.0 h1:Z9x10SaZgFaB6jHgz3OWooynhSa40CsWkpe5hEnG/qA= ++github.com/docker/cli-docs-tool v0.6.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o= + github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= + github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= + github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +@@ -241,6 +244,7 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= + github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= + github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= + github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= ++github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= + github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= + github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore b/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore +new file mode 100644 +index 000000000000..30f97c3d73ab +--- /dev/null ++++ b/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore +@@ -0,0 +1,2 @@ ++go-md2man ++bin +diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml b/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml +new file mode 100644 +index 000000000000..71f073f3c6b9 +--- /dev/null ++++ b/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml +@@ -0,0 +1,6 @@ ++# For documentation, see https://golangci-lint.run/usage/configuration/ ++ ++linters: ++ enable: ++ - gofumpt ++ +diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile b/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile +new file mode 100644 +index 000000000000..7181c5306f41 +--- /dev/null ++++ b/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile +@@ -0,0 +1,20 @@ ++ARG GO_VERSION=1.18 ++ARG GO_IMAGE=golang:${GO_VERSION} ++ ++FROM --platform=$BUILDPLATFORM $GO_IMAGE AS build ++COPY . /go/src/github.com/cpuguy83/go-md2man ++WORKDIR /go/src/github.com/cpuguy83/go-md2man ++ARG TARGETOS ++ARG TARGETARCH ++ARG TARGETVARIANT ++RUN \ ++ export GOOS="${TARGETOS}"; \ ++ export GOARCH="${TARGETARCH}"; \ ++ if [ "${TARGETARCH}" = "arm" ] && [ "${TARGETVARIANT}" ]; then \ ++ export GOARM="${TARGETVARIANT#v}"; \ ++ fi; \ ++ CGO_ENABLED=0 go build ++ ++FROM scratch ++COPY --from=build /go/src/github.com/cpuguy83/go-md2man/go-md2man /go-md2man ++ENTRYPOINT ["/go-md2man"] +diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md +new file mode 100644 +index 000000000000..1cade6cef6a1 +--- /dev/null ++++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md +@@ -0,0 +1,21 @@ ++The MIT License (MIT) ++ ++Copyright (c) 2014 Brian Goff ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in all ++copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++SOFTWARE. +diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/Makefile b/vendor/github.com/cpuguy83/go-md2man/v2/Makefile +new file mode 100644 +index 000000000000..437fc9997926 +--- /dev/null ++++ b/vendor/github.com/cpuguy83/go-md2man/v2/Makefile +@@ -0,0 +1,35 @@ ++GO111MODULE ?= on ++LINTER_BIN ?= golangci-lint ++ ++export GO111MODULE ++ ++.PHONY: ++build: bin/go-md2man ++ ++.PHONY: clean ++clean: ++ @rm -rf bin/* ++ ++.PHONY: test ++test: ++ @go test $(TEST_FLAGS) ./... ++ ++bin/go-md2man: actual_build_flags := $(BUILD_FLAGS) -o bin/go-md2man ++bin/go-md2man: bin ++ @CGO_ENABLED=0 go build $(actual_build_flags) ++ ++bin: ++ @mkdir ./bin ++ ++.PHONY: mod ++mod: ++ @go mod tidy ++ ++.PHONY: check-mod ++check-mod: # verifies that module changes for go.mod and go.sum are checked in ++ @hack/ci/check_mods.sh ++ ++.PHONY: vendor ++vendor: mod ++ @go mod vendor -v ++ +diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/README.md b/vendor/github.com/cpuguy83/go-md2man/v2/README.md +new file mode 100644 +index 000000000000..0e30d341483c +--- /dev/null ++++ b/vendor/github.com/cpuguy83/go-md2man/v2/README.md +@@ -0,0 +1,15 @@ ++go-md2man ++========= ++ ++Converts markdown into roff (man pages). ++ ++Uses blackfriday to process markdown into man pages. ++ ++### Usage ++ ++./md2man -in /path/to/markdownfile.md -out /manfile/output/path ++ ++### How to contribute ++ ++We use go modules to manage dependencies. ++As such you must be using at lest go1.11. +diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md b/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md +new file mode 100644 +index 000000000000..aa4587e279ff +--- /dev/null ++++ b/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md +@@ -0,0 +1,28 @@ ++go-md2man 1 "January 2015" go-md2man "User Manual" ++================================================== ++ ++# NAME ++go-md2man - Convert markdown files into manpages ++ ++# SYNOPSIS ++**go-md2man** [**-in**=*/path/to/md/file*] [**-out**=*/path/to/output*] ++ ++# DESCRIPTION ++**go-md2man** converts standard markdown formatted documents into manpages. It is ++written purely in Go so as to reduce dependencies on 3rd party libs. ++ ++By default, the input is stdin and the output is stdout. ++ ++# EXAMPLES ++Convert the markdown file *go-md2man.1.md* into a manpage: ++``` ++go-md2man < go-md2man.1.md > go-md2man.1 ++``` ++ ++Same, but using command line arguments instead of shell redirection: ++``` ++go-md2man -in=go-md2man.1.md -out=go-md2man.1 ++``` ++ ++# HISTORY ++January 2015, Originally compiled by Brian Goff (cpuguy83@gmail.com). +diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go +new file mode 100644 +index 000000000000..4ff873b8e767 +--- /dev/null ++++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go +@@ -0,0 +1,53 @@ ++package main ++ ++import ( ++ "flag" ++ "fmt" ++ "io/ioutil" ++ "os" ++ ++ "github.com/cpuguy83/go-md2man/v2/md2man" ++) ++ ++var ( ++ inFilePath = flag.String("in", "", "Path to file to be processed (default: stdin)") ++ outFilePath = flag.String("out", "", "Path to output processed file (default: stdout)") ++) ++ ++func main() { ++ var err error ++ flag.Parse() ++ ++ inFile := os.Stdin ++ if *inFilePath != "" { ++ inFile, err = os.Open(*inFilePath) ++ if err != nil { ++ fmt.Println(err) ++ os.Exit(1) ++ } ++ } ++ defer inFile.Close() // nolint: errcheck ++ ++ doc, err := ioutil.ReadAll(inFile) ++ if err != nil { ++ fmt.Println(err) ++ os.Exit(1) ++ } ++ ++ out := md2man.Render(doc) ++ ++ outFile := os.Stdout ++ if *outFilePath != "" { ++ outFile, err = os.Create(*outFilePath) ++ if err != nil { ++ fmt.Println(err) ++ os.Exit(1) ++ } ++ defer outFile.Close() // nolint: errcheck ++ } ++ _, err = outFile.Write(out) ++ if err != nil { ++ fmt.Println(err) ++ os.Exit(1) ++ } ++} +diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +new file mode 100644 +index 000000000000..42bf32aab003 +--- /dev/null ++++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +@@ -0,0 +1,16 @@ ++package md2man ++ ++import ( ++ "github.com/russross/blackfriday/v2" ++) ++ ++// Render converts a markdown document into a roff formatted document. ++func Render(doc []byte) []byte { ++ renderer := NewRoffRenderer() ++ ++ return blackfriday.Run(doc, ++ []blackfriday.Option{ ++ blackfriday.WithRenderer(renderer), ++ blackfriday.WithExtensions(renderer.GetExtensions()), ++ }...) ++} +diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +new file mode 100644 +index 000000000000..4b19188d90fd +--- /dev/null ++++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +@@ -0,0 +1,348 @@ ++package md2man ++ ++import ( ++ "bytes" ++ "fmt" ++ "io" ++ "os" ++ "strings" ++ ++ "github.com/russross/blackfriday/v2" ++) ++ ++// roffRenderer implements the blackfriday.Renderer interface for creating ++// roff format (manpages) from markdown text ++type roffRenderer struct { ++ extensions blackfriday.Extensions ++ listCounters []int ++ firstHeader bool ++ firstDD bool ++ listDepth int ++} ++ ++const ( ++ titleHeader = ".TH " ++ topLevelHeader = "\n\n.SH " ++ secondLevelHdr = "\n.SH " ++ otherHeader = "\n.SS " ++ crTag = "\n" ++ emphTag = "\\fI" ++ emphCloseTag = "\\fP" ++ strongTag = "\\fB" ++ strongCloseTag = "\\fP" ++ breakTag = "\n.br\n" ++ paraTag = "\n.PP\n" ++ hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" ++ linkTag = "\n\\[la]" ++ linkCloseTag = "\\[ra]" ++ codespanTag = "\\fB" ++ codespanCloseTag = "\\fR" ++ codeTag = "\n.EX\n" ++ codeCloseTag = "\n.EE\n" ++ quoteTag = "\n.PP\n.RS\n" ++ quoteCloseTag = "\n.RE\n" ++ listTag = "\n.RS\n" ++ listCloseTag = "\n.RE\n" ++ dtTag = "\n.TP\n" ++ dd2Tag = "\n" ++ tableStart = "\n.TS\nallbox;\n" ++ tableEnd = ".TE\n" ++ tableCellStart = "T{\n" ++ tableCellEnd = "\nT}\n" ++) ++ ++// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents ++// from markdown ++func NewRoffRenderer() *roffRenderer { // nolint: golint ++ var extensions blackfriday.Extensions ++ ++ extensions |= blackfriday.NoIntraEmphasis ++ extensions |= blackfriday.Tables ++ extensions |= blackfriday.FencedCode ++ extensions |= blackfriday.SpaceHeadings ++ extensions |= blackfriday.Footnotes ++ extensions |= blackfriday.Titleblock ++ extensions |= blackfriday.DefinitionLists ++ return &roffRenderer{ ++ extensions: extensions, ++ } ++} ++ ++// GetExtensions returns the list of extensions used by this renderer implementation ++func (r *roffRenderer) GetExtensions() blackfriday.Extensions { ++ return r.extensions ++} ++ ++// RenderHeader handles outputting the header at document start ++func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { ++ // disable hyphenation ++ out(w, ".nh\n") ++} ++ ++// RenderFooter handles outputting the footer at the document end; the roff ++// renderer has no footer information ++func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { ++} ++ ++// RenderNode is called for each node in a markdown document; based on the node ++// type the equivalent roff output is sent to the writer ++func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { ++ walkAction := blackfriday.GoToNext ++ ++ switch node.Type { ++ case blackfriday.Text: ++ escapeSpecialChars(w, node.Literal) ++ case blackfriday.Softbreak: ++ out(w, crTag) ++ case blackfriday.Hardbreak: ++ out(w, breakTag) ++ case blackfriday.Emph: ++ if entering { ++ out(w, emphTag) ++ } else { ++ out(w, emphCloseTag) ++ } ++ case blackfriday.Strong: ++ if entering { ++ out(w, strongTag) ++ } else { ++ out(w, strongCloseTag) ++ } ++ case blackfriday.Link: ++ // Don't render the link text for automatic links, because this ++ // will only duplicate the URL in the roff output. ++ // See https://daringfireball.net/projects/markdown/syntax#autolink ++ if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) { ++ out(w, string(node.FirstChild.Literal)) ++ } ++ // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page. ++ escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-") ++ out(w, linkTag+escapedLink+linkCloseTag) ++ walkAction = blackfriday.SkipChildren ++ case blackfriday.Image: ++ // ignore images ++ walkAction = blackfriday.SkipChildren ++ case blackfriday.Code: ++ out(w, codespanTag) ++ escapeSpecialChars(w, node.Literal) ++ out(w, codespanCloseTag) ++ case blackfriday.Document: ++ break ++ case blackfriday.Paragraph: ++ // roff .PP markers break lists ++ if r.listDepth > 0 { ++ return blackfriday.GoToNext ++ } ++ if entering { ++ out(w, paraTag) ++ } else { ++ out(w, crTag) ++ } ++ case blackfriday.BlockQuote: ++ if entering { ++ out(w, quoteTag) ++ } else { ++ out(w, quoteCloseTag) ++ } ++ case blackfriday.Heading: ++ r.handleHeading(w, node, entering) ++ case blackfriday.HorizontalRule: ++ out(w, hruleTag) ++ case blackfriday.List: ++ r.handleList(w, node, entering) ++ case blackfriday.Item: ++ r.handleItem(w, node, entering) ++ case blackfriday.CodeBlock: ++ out(w, codeTag) ++ escapeSpecialChars(w, node.Literal) ++ out(w, codeCloseTag) ++ case blackfriday.Table: ++ r.handleTable(w, node, entering) ++ case blackfriday.TableHead: ++ case blackfriday.TableBody: ++ case blackfriday.TableRow: ++ // no action as cell entries do all the nroff formatting ++ return blackfriday.GoToNext ++ case blackfriday.TableCell: ++ r.handleTableCell(w, node, entering) ++ case blackfriday.HTMLSpan: ++ // ignore other HTML tags ++ case blackfriday.HTMLBlock: ++ if bytes.HasPrefix(node.Literal, []byte(" ++ ++ ++`) ++ if err != nil { ++ return err ++ } ++ if err = icTpl.Execute(&icBuf, struct { ++ Command string ++ }{ ++ Command: cmd.CommandPath(), ++ }); err != nil { ++ return err ++ } ++ if err = os.WriteFile(targetPath, icBuf.Bytes(), 0644); err != nil { ++ return err ++ } ++ } else if err := copyFile(sourcePath, targetPath); err != nil { ++ return err ++ } ++ ++ content, err := os.ReadFile(targetPath) ++ if err != nil { ++ return err ++ } ++ ++ cs := string(content) ++ ++ start := strings.Index(cs, "") ++ end := strings.Index(cs, "") ++ ++ if start == -1 { ++ return fmt.Errorf("no start marker in %s", mdFile) ++ } ++ if end == -1 { ++ return fmt.Errorf("no end marker in %s", mdFile) ++ } ++ ++ out, err := mdCmdOutput(cmd, cs) ++ if err != nil { ++ return err ++ } ++ cont := cs[:start] + "" + "\n" + out + "\n" + cs[end:] ++ ++ fi, err := os.Stat(targetPath) ++ if err != nil { ++ return err ++ } ++ if err = os.WriteFile(targetPath, []byte(cont), fi.Mode()); err != nil { ++ return fmt.Errorf("failed to write %s: %w", targetPath, err) ++ } ++ ++ return nil ++} ++ ++func mdFilename(cmd *cobra.Command) string { ++ name := cmd.CommandPath() ++ if i := strings.Index(name, " "); i >= 0 { ++ name = name[i+1:] ++ } ++ return strings.ReplaceAll(name, " ", "_") + ".md" ++} ++ ++func mdMakeLink(txt, link string, f *pflag.Flag, isAnchor bool) string { ++ link = "#" + link ++ annotations, ok := f.Annotations[annotation.ExternalURL] ++ if ok && len(annotations) > 0 { ++ link = annotations[0] ++ } else { ++ if !isAnchor { ++ return txt ++ } ++ } ++ ++ return "[" + txt + "](" + link + ")" ++} ++ ++type mdTable struct { ++ out *strings.Builder ++ tabWriter *tabwriter.Writer ++} ++ ++func newMdTable(headers ...string) *mdTable { ++ w := &strings.Builder{} ++ t := &mdTable{ ++ out: w, ++ // Using tabwriter.Debug, which uses "|" as separator instead of tabs, ++ // which is what we want. It's a bit of a hack, but does the job :) ++ tabWriter: tabwriter.NewWriter(w, 5, 5, 1, ' ', tabwriter.Debug), ++ } ++ t.addHeader(headers...) ++ return t ++} ++ ++func (t *mdTable) addHeader(cols ...string) { ++ t.AddRow(cols...) ++ _, _ = t.tabWriter.Write([]byte("|" + strings.Repeat(":---\t", len(cols)) + "\n")) ++} ++ ++func (t *mdTable) AddRow(cols ...string) { ++ for i := range cols { ++ cols[i] = mdEscapePipe(cols[i]) ++ } ++ _, _ = t.tabWriter.Write([]byte("| " + strings.Join(cols, "\t ") + "\t\n")) ++} ++ ++func (t *mdTable) String() string { ++ _ = t.tabWriter.Flush() ++ return adjustSep.ReplaceAllStringFunc(t.out.String()+"\n", func(in string) string { ++ return strings.ReplaceAll(in, " ", "-") ++ }) ++} ++ ++func mdCmdOutput(cmd *cobra.Command, old string) (string, error) { ++ b := &strings.Builder{} ++ ++ desc := cmd.Short ++ if cmd.Long != "" { ++ desc = cmd.Long ++ } ++ if desc != "" { ++ b.WriteString(desc + "\n\n") ++ } ++ ++ if aliases := getAliases(cmd); len(aliases) != 0 { ++ b.WriteString("### Aliases\n\n") ++ b.WriteString("`" + strings.Join(aliases, "`, `") + "`") ++ b.WriteString("\n\n") ++ } ++ ++ if len(cmd.Commands()) != 0 { ++ b.WriteString("### Subcommands\n\n") ++ table := newMdTable("Name", "Description") ++ for _, c := range cmd.Commands() { ++ if c.Hidden { ++ continue ++ } ++ table.AddRow(fmt.Sprintf("[`%s`](%s)", c.Name(), mdFilename(c)), c.Short) ++ } ++ b.WriteString(table.String() + "\n") ++ } ++ ++ // add inherited flags before checking for flags availability ++ cmd.Flags().AddFlagSet(cmd.InheritedFlags()) ++ ++ if cmd.Flags().HasAvailableFlags() { ++ b.WriteString("### Options\n\n") ++ table := newMdTable("Name", "Type", "Default", "Description") ++ cmd.Flags().VisitAll(func(f *pflag.Flag) { ++ if f.Hidden { ++ return ++ } ++ isLink := strings.Contains(old, "") ++ var name string ++ if f.Shorthand != "" { ++ name = mdMakeLink("`-"+f.Shorthand+"`", f.Name, f, isLink) ++ name += ", " ++ } ++ name += mdMakeLink("`--"+f.Name+"`", f.Name, f, isLink) ++ ++ var ftype string ++ if f.Value.Type() != "bool" { ++ ftype = "`" + f.Value.Type() + "`" ++ } ++ ++ var defval string ++ if v, ok := f.Annotations[annotation.DefaultValue]; ok && len(v) > 0 { ++ defval = v[0] ++ if cd, ok := f.Annotations[annotation.CodeDelimiter]; ok { ++ defval = strings.ReplaceAll(defval, cd[0], "`") ++ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok { ++ defval = strings.ReplaceAll(defval, cd, "`") ++ } ++ } else if f.DefValue != "" && (f.Value.Type() != "bool" && f.DefValue != "true") && f.DefValue != "[]" { ++ defval = "`" + f.DefValue + "`" ++ } ++ ++ usage := f.Usage ++ if cd, ok := f.Annotations[annotation.CodeDelimiter]; ok { ++ usage = strings.ReplaceAll(usage, cd[0], "`") ++ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok { ++ usage = strings.ReplaceAll(usage, cd, "`") ++ } ++ table.AddRow(name, ftype, defval, mdReplaceNewline(usage)) ++ }) ++ b.WriteString(table.String()) ++ } ++ ++ return b.String(), nil ++} ++ ++func mdEscapePipe(s string) string { ++ return strings.ReplaceAll(s, `|`, `\|`) ++} ++ ++func mdReplaceNewline(s string) string { ++ return nlRegexp.ReplaceAllString(s, "
") ++} +diff --git a/vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go b/vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go +new file mode 100644 +index 000000000000..523524297af4 +--- /dev/null ++++ b/vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go +@@ -0,0 +1,435 @@ ++// Copyright 2017 cli-docs-tool authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package clidocstool ++ ++import ( ++ "fmt" ++ "io" ++ "log" ++ "os" ++ "path/filepath" ++ "sort" ++ "strings" ++ ++ "github.com/docker/cli-docs-tool/annotation" ++ "github.com/spf13/cobra" ++ "github.com/spf13/pflag" ++ "gopkg.in/yaml.v3" ++) ++ ++type cmdOption struct { ++ Option string ++ Shorthand string `yaml:",omitempty"` ++ ValueType string `yaml:"value_type,omitempty"` ++ DefaultValue string `yaml:"default_value,omitempty"` ++ Description string `yaml:",omitempty"` ++ DetailsURL string `yaml:"details_url,omitempty"` // DetailsURL contains an anchor-id or link for more information on this flag ++ Deprecated bool ++ Hidden bool ++ MinAPIVersion string `yaml:"min_api_version,omitempty"` ++ Experimental bool ++ ExperimentalCLI bool ++ Kubernetes bool ++ Swarm bool ++ OSType string `yaml:"os_type,omitempty"` ++} ++ ++type cmdDoc struct { ++ Name string `yaml:"command"` ++ SeeAlso []string `yaml:"parent,omitempty"` ++ Version string `yaml:"engine_version,omitempty"` ++ Aliases string `yaml:",omitempty"` ++ Short string `yaml:",omitempty"` ++ Long string `yaml:",omitempty"` ++ Usage string `yaml:",omitempty"` ++ Pname string `yaml:",omitempty"` ++ Plink string `yaml:",omitempty"` ++ Cname []string `yaml:",omitempty"` ++ Clink []string `yaml:",omitempty"` ++ Options []cmdOption `yaml:",omitempty"` ++ InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"` ++ Example string `yaml:"examples,omitempty"` ++ Deprecated bool ++ Hidden bool ++ MinAPIVersion string `yaml:"min_api_version,omitempty"` ++ Experimental bool ++ ExperimentalCLI bool ++ Kubernetes bool ++ Swarm bool ++ OSType string `yaml:"os_type,omitempty"` ++} ++ ++// GenYamlTree creates yaml structured ref files for this command and all descendants ++// in the directory given. This function may not work ++// correctly if your command names have `-` in them. If you have `cmd` with two ++// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` ++// it is undefined which help output will be in the file `cmd-sub-third.1`. ++func (c *Client) GenYamlTree(cmd *cobra.Command) error { ++ emptyStr := func(s string) string { return "" } ++ if err := c.loadLongDescription(cmd); err != nil { ++ return err ++ } ++ return c.genYamlTreeCustom(cmd, emptyStr) ++} ++ ++// genYamlTreeCustom creates yaml structured ref files. ++func (c *Client) genYamlTreeCustom(cmd *cobra.Command, filePrepender func(string) string) error { ++ for _, sc := range cmd.Commands() { ++ if !sc.Runnable() && !sc.HasAvailableSubCommands() { ++ // skip non-runnable commands without subcommands ++ // but *do* generate YAML for hidden and deprecated commands ++ // the YAML will have those included as metadata, so that the ++ // documentation repository can decide whether or not to present them ++ continue ++ } ++ if err := c.genYamlTreeCustom(sc, filePrepender); err != nil { ++ return err ++ } ++ } ++ ++ // always disable the addition of [flags] to the usage ++ cmd.DisableFlagsInUseLine = true ++ ++ // The "root" command used in the generator is just a "stub", and only has a ++ // list of subcommands, but not (e.g.) global options/flags. We should fix ++ // that, so that the YAML file for the docker "root" command contains the ++ // global flags. ++ ++ // Skip the root command altogether, to prevent generating a useless ++ // YAML file for plugins. ++ if c.plugin && !cmd.HasParent() { ++ return nil ++ } ++ ++ log.Printf("INFO: Generating YAML for %q", cmd.CommandPath()) ++ basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".yaml" ++ target := filepath.Join(c.target, basename) ++ f, err := os.Create(target) ++ if err != nil { ++ return err ++ } ++ defer f.Close() ++ ++ if _, err := io.WriteString(f, filePrepender(target)); err != nil { ++ return err ++ } ++ return c.genYamlCustom(cmd, f) ++} ++ ++// genYamlCustom creates custom yaml output. ++// nolint: gocyclo ++func (c *Client) genYamlCustom(cmd *cobra.Command, w io.Writer) error { ++ const ( ++ // shortMaxWidth is the maximum width for the "Short" description before ++ // we force YAML to use multi-line syntax. The goal is to make the total ++ // width fit within 80 characters. This value is based on 80 characters ++ // minus the with of the field, colon, and whitespace ('short: '). ++ shortMaxWidth = 73 ++ ++ // longMaxWidth is the maximum width for the "Short" description before ++ // we force YAML to use multi-line syntax. The goal is to make the total ++ // width fit within 80 characters. This value is based on 80 characters ++ // minus the with of the field, colon, and whitespace ('long: '). ++ longMaxWidth = 74 ++ ) ++ ++ // necessary to add inherited flags otherwise some ++ // fields are not properly declared like usage ++ cmd.Flags().AddFlagSet(cmd.InheritedFlags()) ++ ++ cliDoc := cmdDoc{ ++ Name: cmd.CommandPath(), ++ Aliases: strings.Join(getAliases(cmd), ", "), ++ Short: forceMultiLine(cmd.Short, shortMaxWidth), ++ Long: forceMultiLine(cmd.Long, longMaxWidth), ++ Example: cmd.Example, ++ Deprecated: len(cmd.Deprecated) > 0, ++ Hidden: cmd.Hidden, ++ } ++ ++ if len(cliDoc.Long) == 0 { ++ cliDoc.Long = cliDoc.Short ++ } ++ ++ if cmd.Runnable() { ++ cliDoc.Usage = cmd.UseLine() ++ } ++ ++ // check recursively to handle inherited annotations ++ for curr := cmd; curr != nil; curr = curr.Parent() { ++ if v, ok := curr.Annotations["version"]; ok && cliDoc.MinAPIVersion == "" { ++ cliDoc.MinAPIVersion = v ++ } ++ if _, ok := curr.Annotations["experimental"]; ok && !cliDoc.Experimental { ++ cliDoc.Experimental = true ++ } ++ if _, ok := curr.Annotations["experimentalCLI"]; ok && !cliDoc.ExperimentalCLI { ++ cliDoc.ExperimentalCLI = true ++ } ++ if _, ok := curr.Annotations["kubernetes"]; ok && !cliDoc.Kubernetes { ++ cliDoc.Kubernetes = true ++ } ++ if _, ok := curr.Annotations["swarm"]; ok && !cliDoc.Swarm { ++ cliDoc.Swarm = true ++ } ++ if o, ok := curr.Annotations["ostype"]; ok && cliDoc.OSType == "" { ++ cliDoc.OSType = o ++ } ++ if _, ok := cmd.Annotations[annotation.CodeDelimiter]; !ok { ++ if cd, cok := curr.Annotations[annotation.CodeDelimiter]; cok { ++ if cmd.Annotations == nil { ++ cmd.Annotations = map[string]string{} ++ } ++ cmd.Annotations[annotation.CodeDelimiter] = cd ++ } ++ } ++ } ++ ++ anchors := make(map[string]struct{}) ++ if a, ok := cmd.Annotations["anchors"]; ok && a != "" { ++ for _, anchor := range strings.Split(a, ",") { ++ anchors[anchor] = struct{}{} ++ } ++ } ++ ++ flags := cmd.NonInheritedFlags() ++ if flags.HasFlags() { ++ cliDoc.Options = genFlagResult(cmd, flags, anchors) ++ } ++ flags = cmd.InheritedFlags() ++ if flags.HasFlags() { ++ cliDoc.InheritedOptions = genFlagResult(cmd, flags, anchors) ++ } ++ ++ if hasSeeAlso(cmd) { ++ if cmd.HasParent() { ++ parent := cmd.Parent() ++ cliDoc.Pname = parent.CommandPath() ++ cliDoc.Plink = strings.Replace(cliDoc.Pname, " ", "_", -1) + ".yaml" ++ cmd.VisitParents(func(c *cobra.Command) { ++ if c.DisableAutoGenTag { ++ cmd.DisableAutoGenTag = c.DisableAutoGenTag ++ } ++ }) ++ } ++ ++ children := cmd.Commands() ++ sort.Sort(byName(children)) ++ ++ for _, child := range children { ++ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ cliDoc.Cname = append(cliDoc.Cname, cliDoc.Name+" "+child.Name()) ++ cliDoc.Clink = append(cliDoc.Clink, strings.Replace(cliDoc.Name+"_"+child.Name(), " ", "_", -1)+".yaml") ++ } ++ } ++ ++ final, err := yaml.Marshal(&cliDoc) ++ if err != nil { ++ fmt.Println(err) ++ os.Exit(1) ++ } ++ if _, err := fmt.Fprintln(w, string(final)); err != nil { ++ return err ++ } ++ return nil ++} ++ ++func genFlagResult(cmd *cobra.Command, flags *pflag.FlagSet, anchors map[string]struct{}) []cmdOption { ++ var ( ++ result []cmdOption ++ opt cmdOption ++ ) ++ ++ const ( ++ // shortMaxWidth is the maximum width for the "Short" description before ++ // we force YAML to use multi-line syntax. The goal is to make the total ++ // width fit within 80 characters. This value is based on 80 characters ++ // minus the with of the field, colon, and whitespace (' default_value: '). ++ defaultValueMaxWidth = 64 ++ ++ // longMaxWidth is the maximum width for the "Short" description before ++ // we force YAML to use multi-line syntax. The goal is to make the total ++ // width fit within 80 characters. This value is based on 80 characters ++ // minus the with of the field, colon, and whitespace (' description: '). ++ descriptionMaxWidth = 66 ++ ) ++ ++ flags.VisitAll(func(flag *pflag.Flag) { ++ opt = cmdOption{ ++ Option: flag.Name, ++ ValueType: flag.Value.Type(), ++ Deprecated: len(flag.Deprecated) > 0, ++ Hidden: flag.Hidden, ++ } ++ ++ var defval string ++ if v, ok := flag.Annotations[annotation.DefaultValue]; ok && len(v) > 0 { ++ defval = v[0] ++ if cd, ok := flag.Annotations[annotation.CodeDelimiter]; ok { ++ defval = strings.ReplaceAll(defval, cd[0], "`") ++ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok { ++ defval = strings.ReplaceAll(defval, cd, "`") ++ } ++ } else { ++ defval = flag.DefValue ++ } ++ opt.DefaultValue = forceMultiLine(defval, defaultValueMaxWidth) ++ ++ usage := flag.Usage ++ if cd, ok := flag.Annotations[annotation.CodeDelimiter]; ok { ++ usage = strings.ReplaceAll(usage, cd[0], "`") ++ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok { ++ usage = strings.ReplaceAll(usage, cd, "`") ++ } ++ opt.Description = forceMultiLine(usage, descriptionMaxWidth) ++ ++ if v, ok := flag.Annotations[annotation.ExternalURL]; ok && len(v) > 0 { ++ opt.DetailsURL = strings.TrimPrefix(v[0], "https://docs.docker.com") ++ } else if _, ok = anchors[flag.Name]; ok { ++ opt.DetailsURL = "#" + flag.Name ++ } ++ ++ // Todo, when we mark a shorthand is deprecated, but specify an empty message. ++ // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. ++ // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. ++ if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { ++ opt.Shorthand = flag.Shorthand ++ } ++ if _, ok := flag.Annotations["experimental"]; ok { ++ opt.Experimental = true ++ } ++ if _, ok := flag.Annotations["deprecated"]; ok { ++ opt.Deprecated = true ++ } ++ if v, ok := flag.Annotations["version"]; ok { ++ opt.MinAPIVersion = v[0] ++ } ++ if _, ok := flag.Annotations["experimentalCLI"]; ok { ++ opt.ExperimentalCLI = true ++ } ++ if _, ok := flag.Annotations["kubernetes"]; ok { ++ opt.Kubernetes = true ++ } ++ if _, ok := flag.Annotations["swarm"]; ok { ++ opt.Swarm = true ++ } ++ ++ // Note that the annotation can have multiple ostypes set, however, multiple ++ // values are currently not used (and unlikely will). ++ // ++ // To simplify usage of the os_type property in the YAML, and for consistency ++ // with the same property for commands, we're only using the first ostype that's set. ++ if ostypes, ok := flag.Annotations["ostype"]; ok && len(opt.OSType) == 0 && len(ostypes) > 0 { ++ opt.OSType = ostypes[0] ++ } ++ ++ result = append(result, opt) ++ }) ++ ++ return result ++} ++ ++// forceMultiLine appends a newline (\n) to strings that are longer than max ++// to force the yaml lib to use block notation (https://yaml.org/spec/1.2/spec.html#Block) ++// instead of a single-line string with newlines and tabs encoded("string\nline1\nline2"). ++// ++// This makes the generated YAML more readable, and easier to review changes. ++// max can be used to customize the width to keep the whole line < 80 chars. ++func forceMultiLine(s string, max int) string { ++ s = strings.TrimSpace(s) ++ if len(s) > max && !strings.Contains(s, "\n") { ++ s = s + "\n" ++ } ++ return s ++} ++ ++// Small duplication for cobra utils ++func hasSeeAlso(cmd *cobra.Command) bool { ++ if cmd.HasParent() { ++ return true ++ } ++ for _, c := range cmd.Commands() { ++ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ return true ++ } ++ return false ++} ++ ++// loadLongDescription gets long descriptions and examples from markdown. ++func (c *Client) loadLongDescription(parentCmd *cobra.Command) error { ++ for _, cmd := range parentCmd.Commands() { ++ if cmd.HasSubCommands() { ++ if err := c.loadLongDescription(cmd); err != nil { ++ return err ++ } ++ } ++ name := cmd.CommandPath() ++ if i := strings.Index(name, " "); i >= 0 { ++ // remove root command / binary name ++ name = name[i+1:] ++ } ++ if name == "" { ++ continue ++ } ++ mdFile := strings.ReplaceAll(name, " ", "_") + ".md" ++ sourcePath := filepath.Join(c.source, mdFile) ++ content, err := os.ReadFile(sourcePath) ++ if os.IsNotExist(err) { ++ log.Printf("WARN: %s does not exist, skipping Markdown examples for YAML doc\n", mdFile) ++ continue ++ } ++ if err != nil { ++ return err ++ } ++ applyDescriptionAndExamples(cmd, string(content)) ++ } ++ return nil ++} ++ ++// applyDescriptionAndExamples fills in cmd.Long and cmd.Example with the ++// "Description" and "Examples" H2 sections in mdString (if present). ++func applyDescriptionAndExamples(cmd *cobra.Command, mdString string) { ++ sections := getSections(mdString) ++ var ( ++ anchors []string ++ md string ++ ) ++ if sections["description"] != "" { ++ md, anchors = cleanupMarkDown(sections["description"]) ++ cmd.Long = md ++ anchors = append(anchors, md) ++ } ++ if sections["examples"] != "" { ++ md, anchors = cleanupMarkDown(sections["examples"]) ++ cmd.Example = md ++ anchors = append(anchors, md) ++ } ++ if len(anchors) > 0 { ++ if cmd.Annotations == nil { ++ cmd.Annotations = make(map[string]string) ++ } ++ cmd.Annotations["anchors"] = strings.Join(anchors, ",") ++ } ++} ++ ++type byName []*cobra.Command ++ ++func (s byName) Len() int { return len(s) } ++func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } ++func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } +diff --git a/vendor/github.com/docker/cli-docs-tool/docker-bake.hcl b/vendor/github.com/docker/cli-docs-tool/docker-bake.hcl +new file mode 100644 +index 000000000000..4a5f44f83018 +--- /dev/null ++++ b/vendor/github.com/docker/cli-docs-tool/docker-bake.hcl +@@ -0,0 +1,51 @@ ++// Copyright 2021 cli-docs-tool authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++group "default" { ++ targets = ["test"] ++} ++ ++group "validate" { ++ targets = ["lint", "vendor-validate", "license-validate"] ++} ++ ++target "lint" { ++ target = "lint" ++ output = ["type=cacheonly"] ++} ++ ++target "vendor-validate" { ++ target = "vendor-validate" ++ output = ["type=cacheonly"] ++} ++ ++target "vendor-update" { ++ target = "vendor-update" ++ output = ["."] ++} ++ ++target "test" { ++ target = "test-coverage" ++ output = ["."] ++} ++ ++target "license-validate" { ++ target = "license-validate" ++ output = ["type=cacheonly"] ++} ++ ++target "license-update" { ++ target = "license-update" ++ output = ["."] ++} +diff --git a/vendor/github.com/docker/cli-docs-tool/markdown.go b/vendor/github.com/docker/cli-docs-tool/markdown.go +new file mode 100644 +index 000000000000..32849236ed9c +--- /dev/null ++++ b/vendor/github.com/docker/cli-docs-tool/markdown.go +@@ -0,0 +1,87 @@ ++// Copyright 2017 cli-docs-tool authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package clidocstool ++ ++import ( ++ "regexp" ++ "strings" ++ "unicode" ++) ++ ++var ( ++ // mdHeading matches MarkDown H1..h6 headings. Note that this regex may produce ++ // false positives for (e.g.) comments in code-blocks (# this is a comment), ++ // so should not be used as a generic regex for other purposes. ++ mdHeading = regexp.MustCompile(`^([#]{1,6})\s(.*)$`) ++ // htmlAnchor matches inline HTML anchors. This is intended to only match anchors ++ // for our use-case; DO NOT consider using this as a generic regex, or at least ++ // not before reading https://stackoverflow.com/a/1732454/1811501. ++ htmlAnchor = regexp.MustCompile(`\s*`) ++) ++ ++// getSections returns all H2 sections by title (lowercase) ++func getSections(mdString string) map[string]string { ++ parsedContent := strings.Split("\n"+mdString, "\n## ") ++ sections := make(map[string]string, len(parsedContent)) ++ for _, s := range parsedContent { ++ if strings.HasPrefix(s, "#") { ++ // not a H2 Section ++ continue ++ } ++ parts := strings.SplitN(s, "\n", 2) ++ if len(parts) == 2 { ++ sections[strings.ToLower(parts[0])] = parts[1] ++ } ++ } ++ return sections ++} ++ ++// cleanupMarkDown cleans up the MarkDown passed in mdString for inclusion in ++// YAML. It removes trailing whitespace and substitutes tabs for four spaces ++// to prevent YAML switching to use "compact" form; ("line1 \nline\t2\n") ++// which, although equivalent, is hard to read. ++func cleanupMarkDown(mdString string) (md string, anchors []string) { ++ // remove leading/trailing whitespace, and replace tabs in the whole content ++ mdString = strings.TrimSpace(mdString) ++ mdString = strings.ReplaceAll(mdString, "\t", " ") ++ mdString = strings.ReplaceAll(mdString, "https://docs.docker.com", "") ++ ++ var id string ++ // replace trailing whitespace per line, and handle custom anchors ++ lines := strings.Split(mdString, "\n") ++ for i := 0; i < len(lines); i++ { ++ lines[i] = strings.TrimRightFunc(lines[i], unicode.IsSpace) ++ lines[i], id = convertHTMLAnchor(lines[i]) ++ if id != "" { ++ anchors = append(anchors, id) ++ } ++ } ++ return strings.Join(lines, "\n"), anchors ++} ++ ++// convertHTMLAnchor converts inline anchor-tags in headings () ++// to an extended-markdown property ({#myanchor}). Extended Markdown properties ++// are not supported in GitHub Flavored Markdown, but are supported by Jekyll, ++// and lead to cleaner HTML in our docs, and prevents duplicate anchors. ++// It returns the converted MarkDown heading and the custom ID (if present) ++func convertHTMLAnchor(mdLine string) (md string, customID string) { ++ if m := mdHeading.FindStringSubmatch(mdLine); len(m) > 0 { ++ if a := htmlAnchor.FindStringSubmatch(m[2]); len(a) > 0 { ++ customID = a[1] ++ mdLine = m[1] + " " + htmlAnchor.ReplaceAllString(m[2], "") + " {#" + customID + "}" ++ } ++ } ++ return mdLine, customID ++} +diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore +new file mode 100644 +index 000000000000..75623dcccbb7 +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/.gitignore +@@ -0,0 +1,8 @@ ++*.out ++*.swp ++*.8 ++*.6 ++_obj ++_test* ++markdown ++tags +diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml +new file mode 100644 +index 000000000000..b0b525a5a8e1 +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/.travis.yml +@@ -0,0 +1,17 @@ ++sudo: false ++language: go ++go: ++ - "1.10.x" ++ - "1.11.x" ++ - tip ++matrix: ++ fast_finish: true ++ allow_failures: ++ - go: tip ++install: ++ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). ++script: ++ - go get -t -v ./... ++ - diff -u <(echo -n) <(gofmt -d -s .) ++ - go tool vet . ++ - go test -v ./... +diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt +new file mode 100644 +index 000000000000..2885af3602d8 +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt +@@ -0,0 +1,29 @@ ++Blackfriday is distributed under the Simplified BSD License: ++ ++> Copyright © 2011 Russ Ross ++> All rights reserved. ++> ++> Redistribution and use in source and binary forms, with or without ++> modification, are permitted provided that the following conditions ++> are met: ++> ++> 1. Redistributions of source code must retain the above copyright ++> notice, this list of conditions and the following disclaimer. ++> ++> 2. Redistributions in binary form must reproduce the above ++> copyright notice, this list of conditions and the following ++> disclaimer in the documentation and/or other materials provided with ++> the distribution. ++> ++> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS ++> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ++> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, ++> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, ++> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ++> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ++> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++> POSSIBILITY OF SUCH DAMAGE. +diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md +new file mode 100644 +index 000000000000..d9c08a22fc54 +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/README.md +@@ -0,0 +1,335 @@ ++Blackfriday ++[![Build Status][BuildV2SVG]][BuildV2URL] ++[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] ++=========== ++ ++Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It ++is paranoid about its input (so you can safely feed it user-supplied ++data), it is fast, it supports common extensions (tables, smart ++punctuation substitutions, etc.), and it is safe for all utf-8 ++(unicode) input. ++ ++HTML output is currently supported, along with Smartypants ++extensions. ++ ++It started as a translation from C of [Sundown][3]. ++ ++ ++Installation ++------------ ++ ++Blackfriday is compatible with modern Go releases in module mode. ++With Go installed: ++ ++ go get github.com/russross/blackfriday/v2 ++ ++will resolve and add the package to the current development module, ++then build and install it. Alternatively, you can achieve the same ++if you import it in a package: ++ ++ import "github.com/russross/blackfriday/v2" ++ ++and `go get` without parameters. ++ ++Legacy GOPATH mode is unsupported. ++ ++ ++Versions ++-------- ++ ++Currently maintained and recommended version of Blackfriday is `v2`. It's being ++developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the ++documentation is available at ++https://pkg.go.dev/github.com/russross/blackfriday/v2. ++ ++It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. ++ ++Version 2 offers a number of improvements over v1: ++ ++* Cleaned up API ++* A separate call to [`Parse`][4], which produces an abstract syntax tree for ++ the document ++* Latest bug fixes ++* Flexibility to easily add your own rendering extensions ++ ++Potential drawbacks: ++ ++* Our benchmarks show v2 to be slightly slower than v1. Currently in the ++ ballpark of around 15%. ++* API breakage. If you can't afford modifying your code to adhere to the new API ++ and don't care too much about the new features, v2 is probably not for you. ++* Several bug fixes are trailing behind and still need to be forward-ported to ++ v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for ++ tracking. ++ ++If you are still interested in the legacy `v1`, you can import it from ++`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found ++here: https://pkg.go.dev/github.com/russross/blackfriday. ++ ++ ++Usage ++----- ++ ++For the most sensible markdown processing, it is as simple as getting your input ++into a byte slice and calling: ++ ++```go ++output := blackfriday.Run(input) ++``` ++ ++Your input will be parsed and the output rendered with a set of most popular ++extensions enabled. If you want the most basic feature set, corresponding with ++the bare Markdown specification, use: ++ ++```go ++output := blackfriday.Run(input, blackfriday.WithNoExtensions()) ++``` ++ ++### Sanitize untrusted content ++ ++Blackfriday itself does nothing to protect against malicious content. If you are ++dealing with user-supplied markdown, we recommend running Blackfriday's output ++through HTML sanitizer such as [Bluemonday][5]. ++ ++Here's an example of simple usage of Blackfriday together with Bluemonday: ++ ++```go ++import ( ++ "github.com/microcosm-cc/bluemonday" ++ "github.com/russross/blackfriday/v2" ++) ++ ++// ... ++unsafe := blackfriday.Run(input) ++html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) ++``` ++ ++### Custom options ++ ++If you want to customize the set of options, use `blackfriday.WithExtensions`, ++`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. ++ ++### `blackfriday-tool` ++ ++You can also check out `blackfriday-tool` for a more complete example ++of how to use it. Download and install it using: ++ ++ go get github.com/russross/blackfriday-tool ++ ++This is a simple command-line tool that allows you to process a ++markdown file using a standalone program. You can also browse the ++source directly on github if you are just looking for some example ++code: ++ ++* ++ ++Note that if you have not already done so, installing ++`blackfriday-tool` will be sufficient to download and install ++blackfriday in addition to the tool itself. The tool binary will be ++installed in `$GOPATH/bin`. This is a statically-linked binary that ++can be copied to wherever you need it without worrying about ++dependencies and library versions. ++ ++### Sanitized anchor names ++ ++Blackfriday includes an algorithm for creating sanitized anchor names ++corresponding to a given input text. This algorithm is used to create ++anchors for headings when `AutoHeadingIDs` extension is enabled. The ++algorithm has a specification, so that other packages can create ++compatible anchor names and links to those anchors. ++ ++The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. ++ ++[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to ++create compatible links to the anchor names generated by blackfriday. ++This algorithm is also implemented in a small standalone package at ++[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients ++that want a small package and don't need full functionality of blackfriday. ++ ++ ++Features ++-------- ++ ++All features of Sundown are supported, including: ++ ++* **Compatibility**. The Markdown v1.0.3 test suite passes with ++ the `--tidy` option. Without `--tidy`, the differences are ++ mostly in whitespace and entity escaping, where blackfriday is ++ more consistent and cleaner. ++ ++* **Common extensions**, including table support, fenced code ++ blocks, autolinks, strikethroughs, non-strict emphasis, etc. ++ ++* **Safety**. Blackfriday is paranoid when parsing, making it safe ++ to feed untrusted user input without fear of bad things ++ happening. The test suite stress tests this and there are no ++ known inputs that make it crash. If you find one, please let me ++ know and send me the input that does it. ++ ++ NOTE: "safety" in this context means *runtime safety only*. In order to ++ protect yourself against JavaScript injection in untrusted content, see ++ [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). ++ ++* **Fast processing**. It is fast enough to render on-demand in ++ most web applications without having to cache the output. ++ ++* **Thread safety**. You can run multiple parsers in different ++ goroutines without ill effect. There is no dependence on global ++ shared state. ++ ++* **Minimal dependencies**. Blackfriday only depends on standard ++ library packages in Go. The source code is pretty ++ self-contained, so it is easy to add to any project, including ++ Google App Engine projects. ++ ++* **Standards compliant**. Output successfully validates using the ++ W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. ++ ++ ++Extensions ++---------- ++ ++In addition to the standard markdown syntax, this package ++implements the following extensions: ++ ++* **Intra-word emphasis supression**. The `_` character is ++ commonly used inside words when discussing code, so having ++ markdown interpret it as an emphasis command is usually the ++ wrong thing. Blackfriday lets you treat all emphasis markers as ++ normal characters when they occur inside a word. ++ ++* **Tables**. Tables can be created by drawing them in the input ++ using a simple syntax: ++ ++ ``` ++ Name | Age ++ --------|------ ++ Bob | 27 ++ Alice | 23 ++ ``` ++ ++* **Fenced code blocks**. In addition to the normal 4-space ++ indentation to mark code blocks, you can explicitly mark them ++ and supply a language (to make syntax highlighting simple). Just ++ mark it like this: ++ ++ ```go ++ func getTrue() bool { ++ return true ++ } ++ ``` ++ ++ You can use 3 or more backticks to mark the beginning of the ++ block, and the same number to mark the end of the block. ++ ++ To preserve classes of fenced code blocks while using the bluemonday ++ HTML sanitizer, use the following policy: ++ ++ ```go ++ p := bluemonday.UGCPolicy() ++ p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") ++ html := p.SanitizeBytes(unsafe) ++ ``` ++ ++* **Definition lists**. A simple definition list is made of a single-line ++ term followed by a colon and the definition for that term. ++ ++ Cat ++ : Fluffy animal everyone likes ++ ++ Internet ++ : Vector of transmission for pictures of cats ++ ++ Terms must be separated from the previous definition by a blank line. ++ ++* **Footnotes**. A marker in the text that will become a superscript number; ++ a footnote definition that will be placed in a list of footnotes at the ++ end of the document. A footnote looks like this: ++ ++ This is a footnote.[^1] ++ ++ [^1]: the footnote text. ++ ++* **Autolinking**. Blackfriday can find URLs that have not been ++ explicitly marked as links and turn them into links. ++ ++* **Strikethrough**. Use two tildes (`~~`) to mark text that ++ should be crossed out. ++ ++* **Hard line breaks**. With this extension enabled newlines in the input ++ translate into line breaks in the output. This extension is off by default. ++ ++* **Smart quotes**. Smartypants-style punctuation substitution is ++ supported, turning normal double- and single-quote marks into ++ curly quotes, etc. ++ ++* **LaTeX-style dash parsing** is an additional option, where `--` ++ is translated into `–`, and `---` is translated into ++ `—`. This differs from most smartypants processors, which ++ turn a single hyphen into an ndash and a double hyphen into an ++ mdash. ++ ++* **Smart fractions**, where anything that looks like a fraction ++ is translated into suitable HTML (instead of just a few special ++ cases like most smartypant processors). For example, `4/5` ++ becomes `45`, which renders as ++ 45. ++ ++ ++Other renderers ++--------------- ++ ++Blackfriday is structured to allow alternative rendering engines. Here ++are a few of note: ++ ++* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): ++ provides a GitHub Flavored Markdown renderer with fenced code block ++ highlighting, clickable heading anchor links. ++ ++ It's not customizable, and its goal is to produce HTML output ++ equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), ++ except the rendering is performed locally. ++ ++* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, ++ but for markdown. ++ ++* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): ++ renders output as LaTeX. ++ ++* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience ++ integration with the [Chroma](https://github.com/alecthomas/chroma) code ++ highlighting library. bfchroma is only compatible with v2 of Blackfriday and ++ provides a drop-in renderer ready to use with Blackfriday, as well as ++ options and means for further customization. ++ ++* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. ++ ++* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style ++ ++ ++TODO ++---- ++ ++* More unit testing ++* Improve Unicode support. It does not understand all Unicode ++ rules (about what constitutes a letter, a punctuation symbol, ++ etc.), so it may fail to detect word boundaries correctly in ++ some instances. It is safe on all UTF-8 input. ++ ++ ++License ++------- ++ ++[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) ++ ++ ++ [1]: https://daringfireball.net/projects/markdown/ "Markdown" ++ [2]: https://golang.org/ "Go Language" ++ [3]: https://github.com/vmg/sundown "Sundown" ++ [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" ++ [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" ++ ++ [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 ++ [BuildV2URL]: https://travis-ci.org/russross/blackfriday ++ [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 ++ [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 +diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go +new file mode 100644 +index 000000000000..dcd61e6e35bc +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/block.go +@@ -0,0 +1,1612 @@ ++// ++// Blackfriday Markdown Processor ++// Available at http://github.com/russross/blackfriday ++// ++// Copyright © 2011 Russ Ross . ++// Distributed under the Simplified BSD License. ++// See README.md for details. ++// ++ ++// ++// Functions to parse block-level elements. ++// ++ ++package blackfriday ++ ++import ( ++ "bytes" ++ "html" ++ "regexp" ++ "strings" ++ "unicode" ++) ++ ++const ( ++ charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" ++ escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" ++) ++ ++var ( ++ reBackslashOrAmp = regexp.MustCompile("[\\&]") ++ reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) ++) ++ ++// Parse block-level data. ++// Note: this function and many that it calls assume that ++// the input buffer ends with a newline. ++func (p *Markdown) block(data []byte) { ++ // this is called recursively: enforce a maximum depth ++ if p.nesting >= p.maxNesting { ++ return ++ } ++ p.nesting++ ++ ++ // parse out one block-level construct at a time ++ for len(data) > 0 { ++ // prefixed heading: ++ // ++ // # Heading 1 ++ // ## Heading 2 ++ // ... ++ // ###### Heading 6 ++ if p.isPrefixHeading(data) { ++ data = data[p.prefixHeading(data):] ++ continue ++ } ++ ++ // block of preformatted HTML: ++ // ++ //
++ // ... ++ //
++ if data[0] == '<' { ++ if i := p.html(data, true); i > 0 { ++ data = data[i:] ++ continue ++ } ++ } ++ ++ // title block ++ // ++ // % stuff ++ // % more stuff ++ // % even more stuff ++ if p.extensions&Titleblock != 0 { ++ if data[0] == '%' { ++ if i := p.titleBlock(data, true); i > 0 { ++ data = data[i:] ++ continue ++ } ++ } ++ } ++ ++ // blank lines. note: returns the # of bytes to skip ++ if i := p.isEmpty(data); i > 0 { ++ data = data[i:] ++ continue ++ } ++ ++ // indented code block: ++ // ++ // func max(a, b int) int { ++ // if a > b { ++ // return a ++ // } ++ // return b ++ // } ++ if p.codePrefix(data) > 0 { ++ data = data[p.code(data):] ++ continue ++ } ++ ++ // fenced code block: ++ // ++ // ``` go ++ // func fact(n int) int { ++ // if n <= 1 { ++ // return n ++ // } ++ // return n * fact(n-1) ++ // } ++ // ``` ++ if p.extensions&FencedCode != 0 { ++ if i := p.fencedCodeBlock(data, true); i > 0 { ++ data = data[i:] ++ continue ++ } ++ } ++ ++ // horizontal rule: ++ // ++ // ------ ++ // or ++ // ****** ++ // or ++ // ______ ++ if p.isHRule(data) { ++ p.addBlock(HorizontalRule, nil) ++ var i int ++ for i = 0; i < len(data) && data[i] != '\n'; i++ { ++ } ++ data = data[i:] ++ continue ++ } ++ ++ // block quote: ++ // ++ // > A big quote I found somewhere ++ // > on the web ++ if p.quotePrefix(data) > 0 { ++ data = data[p.quote(data):] ++ continue ++ } ++ ++ // table: ++ // ++ // Name | Age | Phone ++ // ------|-----|--------- ++ // Bob | 31 | 555-1234 ++ // Alice | 27 | 555-4321 ++ if p.extensions&Tables != 0 { ++ if i := p.table(data); i > 0 { ++ data = data[i:] ++ continue ++ } ++ } ++ ++ // an itemized/unordered list: ++ // ++ // * Item 1 ++ // * Item 2 ++ // ++ // also works with + or - ++ if p.uliPrefix(data) > 0 { ++ data = data[p.list(data, 0):] ++ continue ++ } ++ ++ // a numbered/ordered list: ++ // ++ // 1. Item 1 ++ // 2. Item 2 ++ if p.oliPrefix(data) > 0 { ++ data = data[p.list(data, ListTypeOrdered):] ++ continue ++ } ++ ++ // definition lists: ++ // ++ // Term 1 ++ // : Definition a ++ // : Definition b ++ // ++ // Term 2 ++ // : Definition c ++ if p.extensions&DefinitionLists != 0 { ++ if p.dliPrefix(data) > 0 { ++ data = data[p.list(data, ListTypeDefinition):] ++ continue ++ } ++ } ++ ++ // anything else must look like a normal paragraph ++ // note: this finds underlined headings, too ++ data = data[p.paragraph(data):] ++ } ++ ++ p.nesting-- ++} ++ ++func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { ++ p.closeUnmatchedBlocks() ++ container := p.addChild(typ, 0) ++ container.content = content ++ return container ++} ++ ++func (p *Markdown) isPrefixHeading(data []byte) bool { ++ if data[0] != '#' { ++ return false ++ } ++ ++ if p.extensions&SpaceHeadings != 0 { ++ level := 0 ++ for level < 6 && level < len(data) && data[level] == '#' { ++ level++ ++ } ++ if level == len(data) || data[level] != ' ' { ++ return false ++ } ++ } ++ return true ++} ++ ++func (p *Markdown) prefixHeading(data []byte) int { ++ level := 0 ++ for level < 6 && level < len(data) && data[level] == '#' { ++ level++ ++ } ++ i := skipChar(data, level, ' ') ++ end := skipUntilChar(data, i, '\n') ++ skip := end ++ id := "" ++ if p.extensions&HeadingIDs != 0 { ++ j, k := 0, 0 ++ // find start/end of heading id ++ for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { ++ } ++ for k = j + 1; k < end && data[k] != '}'; k++ { ++ } ++ // extract heading id iff found ++ if j < end && k < end { ++ id = string(data[j+2 : k]) ++ end = j ++ skip = k + 1 ++ for end > 0 && data[end-1] == ' ' { ++ end-- ++ } ++ } ++ } ++ for end > 0 && data[end-1] == '#' { ++ if isBackslashEscaped(data, end-1) { ++ break ++ } ++ end-- ++ } ++ for end > 0 && data[end-1] == ' ' { ++ end-- ++ } ++ if end > i { ++ if id == "" && p.extensions&AutoHeadingIDs != 0 { ++ id = SanitizedAnchorName(string(data[i:end])) ++ } ++ block := p.addBlock(Heading, data[i:end]) ++ block.HeadingID = id ++ block.Level = level ++ } ++ return skip ++} ++ ++func (p *Markdown) isUnderlinedHeading(data []byte) int { ++ // test of level 1 heading ++ if data[0] == '=' { ++ i := skipChar(data, 1, '=') ++ i = skipChar(data, i, ' ') ++ if i < len(data) && data[i] == '\n' { ++ return 1 ++ } ++ return 0 ++ } ++ ++ // test of level 2 heading ++ if data[0] == '-' { ++ i := skipChar(data, 1, '-') ++ i = skipChar(data, i, ' ') ++ if i < len(data) && data[i] == '\n' { ++ return 2 ++ } ++ return 0 ++ } ++ ++ return 0 ++} ++ ++func (p *Markdown) titleBlock(data []byte, doRender bool) int { ++ if data[0] != '%' { ++ return 0 ++ } ++ splitData := bytes.Split(data, []byte("\n")) ++ var i int ++ for idx, b := range splitData { ++ if !bytes.HasPrefix(b, []byte("%")) { ++ i = idx // - 1 ++ break ++ } ++ } ++ ++ data = bytes.Join(splitData[0:i], []byte("\n")) ++ consumed := len(data) ++ data = bytes.TrimPrefix(data, []byte("% ")) ++ data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) ++ block := p.addBlock(Heading, data) ++ block.Level = 1 ++ block.IsTitleblock = true ++ ++ return consumed ++} ++ ++func (p *Markdown) html(data []byte, doRender bool) int { ++ var i, j int ++ ++ // identify the opening tag ++ if data[0] != '<' { ++ return 0 ++ } ++ curtag, tagfound := p.htmlFindTag(data[1:]) ++ ++ // handle special cases ++ if !tagfound { ++ // check for an HTML comment ++ if size := p.htmlComment(data, doRender); size > 0 { ++ return size ++ } ++ ++ // check for an
tag ++ if size := p.htmlHr(data, doRender); size > 0 { ++ return size ++ } ++ ++ // no special case recognized ++ return 0 ++ } ++ ++ // look for an unindented matching closing tag ++ // followed by a blank line ++ found := false ++ /* ++ closetag := []byte("\n") ++ j = len(curtag) + 1 ++ for !found { ++ // scan for a closing tag at the beginning of a line ++ if skip := bytes.Index(data[j:], closetag); skip >= 0 { ++ j += skip + len(closetag) ++ } else { ++ break ++ } ++ ++ // see if it is the only thing on the line ++ if skip := p.isEmpty(data[j:]); skip > 0 { ++ // see if it is followed by a blank line/eof ++ j += skip ++ if j >= len(data) { ++ found = true ++ i = j ++ } else { ++ if skip := p.isEmpty(data[j:]); skip > 0 { ++ j += skip ++ found = true ++ i = j ++ } ++ } ++ } ++ } ++ */ ++ ++ // if not found, try a second pass looking for indented match ++ // but not if tag is "ins" or "del" (following original Markdown.pl) ++ if !found && curtag != "ins" && curtag != "del" { ++ i = 1 ++ for i < len(data) { ++ i++ ++ for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { ++ i++ ++ } ++ ++ if i+2+len(curtag) >= len(data) { ++ break ++ } ++ ++ j = p.htmlFindEnd(curtag, data[i-1:]) ++ ++ if j > 0 { ++ i += j - 1 ++ found = true ++ break ++ } ++ } ++ } ++ ++ if !found { ++ return 0 ++ } ++ ++ // the end of the block has been found ++ if doRender { ++ // trim newlines ++ end := i ++ for end > 0 && data[end-1] == '\n' { ++ end-- ++ } ++ finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) ++ } ++ ++ return i ++} ++ ++func finalizeHTMLBlock(block *Node) { ++ block.Literal = block.content ++ block.content = nil ++} ++ ++// HTML comment, lax form ++func (p *Markdown) htmlComment(data []byte, doRender bool) int { ++ i := p.inlineHTMLComment(data) ++ // needs to end with a blank line ++ if j := p.isEmpty(data[i:]); j > 0 { ++ size := i + j ++ if doRender { ++ // trim trailing newlines ++ end := size ++ for end > 0 && data[end-1] == '\n' { ++ end-- ++ } ++ block := p.addBlock(HTMLBlock, data[:end]) ++ finalizeHTMLBlock(block) ++ } ++ return size ++ } ++ return 0 ++} ++ ++// HR, which is the only self-closing block tag considered ++func (p *Markdown) htmlHr(data []byte, doRender bool) int { ++ if len(data) < 4 { ++ return 0 ++ } ++ if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { ++ return 0 ++ } ++ if data[3] != ' ' && data[3] != '/' && data[3] != '>' { ++ // not an
tag after all; at least not a valid one ++ return 0 ++ } ++ i := 3 ++ for i < len(data) && data[i] != '>' && data[i] != '\n' { ++ i++ ++ } ++ if i < len(data) && data[i] == '>' { ++ i++ ++ if j := p.isEmpty(data[i:]); j > 0 { ++ size := i + j ++ if doRender { ++ // trim newlines ++ end := size ++ for end > 0 && data[end-1] == '\n' { ++ end-- ++ } ++ finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) ++ } ++ return size ++ } ++ } ++ return 0 ++} ++ ++func (p *Markdown) htmlFindTag(data []byte) (string, bool) { ++ i := 0 ++ for i < len(data) && isalnum(data[i]) { ++ i++ ++ } ++ key := string(data[:i]) ++ if _, ok := blockTags[key]; ok { ++ return key, true ++ } ++ return "", false ++} ++ ++func (p *Markdown) htmlFindEnd(tag string, data []byte) int { ++ // assume data[0] == '<' && data[1] == '/' already tested ++ if tag == "hr" { ++ return 2 ++ } ++ // check if tag is a match ++ closetag := []byte("") ++ if !bytes.HasPrefix(data, closetag) { ++ return 0 ++ } ++ i := len(closetag) ++ ++ // check that the rest of the line is blank ++ skip := 0 ++ if skip = p.isEmpty(data[i:]); skip == 0 { ++ return 0 ++ } ++ i += skip ++ skip = 0 ++ ++ if i >= len(data) { ++ return i ++ } ++ ++ if p.extensions&LaxHTMLBlocks != 0 { ++ return i ++ } ++ if skip = p.isEmpty(data[i:]); skip == 0 { ++ // following line must be blank ++ return 0 ++ } ++ ++ return i + skip ++} ++ ++func (*Markdown) isEmpty(data []byte) int { ++ // it is okay to call isEmpty on an empty buffer ++ if len(data) == 0 { ++ return 0 ++ } ++ ++ var i int ++ for i = 0; i < len(data) && data[i] != '\n'; i++ { ++ if data[i] != ' ' && data[i] != '\t' { ++ return 0 ++ } ++ } ++ if i < len(data) && data[i] == '\n' { ++ i++ ++ } ++ return i ++} ++ ++func (*Markdown) isHRule(data []byte) bool { ++ i := 0 ++ ++ // skip up to three spaces ++ for i < 3 && data[i] == ' ' { ++ i++ ++ } ++ ++ // look at the hrule char ++ if data[i] != '*' && data[i] != '-' && data[i] != '_' { ++ return false ++ } ++ c := data[i] ++ ++ // the whole line must be the char or whitespace ++ n := 0 ++ for i < len(data) && data[i] != '\n' { ++ switch { ++ case data[i] == c: ++ n++ ++ case data[i] != ' ': ++ return false ++ } ++ i++ ++ } ++ ++ return n >= 3 ++} ++ ++// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, ++// and returns the end index if so, or 0 otherwise. It also returns the marker found. ++// If info is not nil, it gets set to the syntax specified in the fence line. ++func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { ++ i, size := 0, 0 ++ ++ // skip up to three spaces ++ for i < len(data) && i < 3 && data[i] == ' ' { ++ i++ ++ } ++ ++ // check for the marker characters: ~ or ` ++ if i >= len(data) { ++ return 0, "" ++ } ++ if data[i] != '~' && data[i] != '`' { ++ return 0, "" ++ } ++ ++ c := data[i] ++ ++ // the whole line must be the same char or whitespace ++ for i < len(data) && data[i] == c { ++ size++ ++ i++ ++ } ++ ++ // the marker char must occur at least 3 times ++ if size < 3 { ++ return 0, "" ++ } ++ marker = string(data[i-size : i]) ++ ++ // if this is the end marker, it must match the beginning marker ++ if oldmarker != "" && marker != oldmarker { ++ return 0, "" ++ } ++ ++ // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here ++ // into one, always get the info string, and discard it if the caller doesn't care. ++ if info != nil { ++ infoLength := 0 ++ i = skipChar(data, i, ' ') ++ ++ if i >= len(data) { ++ if i == len(data) { ++ return i, marker ++ } ++ return 0, "" ++ } ++ ++ infoStart := i ++ ++ if data[i] == '{' { ++ i++ ++ infoStart++ ++ ++ for i < len(data) && data[i] != '}' && data[i] != '\n' { ++ infoLength++ ++ i++ ++ } ++ ++ if i >= len(data) || data[i] != '}' { ++ return 0, "" ++ } ++ ++ // strip all whitespace at the beginning and the end ++ // of the {} block ++ for infoLength > 0 && isspace(data[infoStart]) { ++ infoStart++ ++ infoLength-- ++ } ++ ++ for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { ++ infoLength-- ++ } ++ i++ ++ i = skipChar(data, i, ' ') ++ } else { ++ for i < len(data) && !isverticalspace(data[i]) { ++ infoLength++ ++ i++ ++ } ++ } ++ ++ *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) ++ } ++ ++ if i == len(data) { ++ return i, marker ++ } ++ if i > len(data) || data[i] != '\n' { ++ return 0, "" ++ } ++ return i + 1, marker // Take newline into account. ++} ++ ++// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, ++// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. ++// If doRender is true, a final newline is mandatory to recognize the fenced code block. ++func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { ++ var info string ++ beg, marker := isFenceLine(data, &info, "") ++ if beg == 0 || beg >= len(data) { ++ return 0 ++ } ++ fenceLength := beg - 1 ++ ++ var work bytes.Buffer ++ work.Write([]byte(info)) ++ work.WriteByte('\n') ++ ++ for { ++ // safe to assume beg < len(data) ++ ++ // check for the end of the code block ++ fenceEnd, _ := isFenceLine(data[beg:], nil, marker) ++ if fenceEnd != 0 { ++ beg += fenceEnd ++ break ++ } ++ ++ // copy the current line ++ end := skipUntilChar(data, beg, '\n') + 1 ++ ++ // did we reach the end of the buffer without a closing marker? ++ if end >= len(data) { ++ return 0 ++ } ++ ++ // verbatim copy to the working buffer ++ if doRender { ++ work.Write(data[beg:end]) ++ } ++ beg = end ++ } ++ ++ if doRender { ++ block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer ++ block.IsFenced = true ++ block.FenceLength = fenceLength ++ finalizeCodeBlock(block) ++ } ++ ++ return beg ++} ++ ++func unescapeChar(str []byte) []byte { ++ if str[0] == '\\' { ++ return []byte{str[1]} ++ } ++ return []byte(html.UnescapeString(string(str))) ++} ++ ++func unescapeString(str []byte) []byte { ++ if reBackslashOrAmp.Match(str) { ++ return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) ++ } ++ return str ++} ++ ++func finalizeCodeBlock(block *Node) { ++ if block.IsFenced { ++ newlinePos := bytes.IndexByte(block.content, '\n') ++ firstLine := block.content[:newlinePos] ++ rest := block.content[newlinePos+1:] ++ block.Info = unescapeString(bytes.Trim(firstLine, "\n")) ++ block.Literal = rest ++ } else { ++ block.Literal = block.content ++ } ++ block.content = nil ++} ++ ++func (p *Markdown) table(data []byte) int { ++ table := p.addBlock(Table, nil) ++ i, columns := p.tableHeader(data) ++ if i == 0 { ++ p.tip = table.Parent ++ table.Unlink() ++ return 0 ++ } ++ ++ p.addBlock(TableBody, nil) ++ ++ for i < len(data) { ++ pipes, rowStart := 0, i ++ for ; i < len(data) && data[i] != '\n'; i++ { ++ if data[i] == '|' { ++ pipes++ ++ } ++ } ++ ++ if pipes == 0 { ++ i = rowStart ++ break ++ } ++ ++ // include the newline in data sent to tableRow ++ if i < len(data) && data[i] == '\n' { ++ i++ ++ } ++ p.tableRow(data[rowStart:i], columns, false) ++ } ++ ++ return i ++} ++ ++// check if the specified position is preceded by an odd number of backslashes ++func isBackslashEscaped(data []byte, i int) bool { ++ backslashes := 0 ++ for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { ++ backslashes++ ++ } ++ return backslashes&1 == 1 ++} ++ ++func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { ++ i := 0 ++ colCount := 1 ++ for i = 0; i < len(data) && data[i] != '\n'; i++ { ++ if data[i] == '|' && !isBackslashEscaped(data, i) { ++ colCount++ ++ } ++ } ++ ++ // doesn't look like a table header ++ if colCount == 1 { ++ return ++ } ++ ++ // include the newline in the data sent to tableRow ++ j := i ++ if j < len(data) && data[j] == '\n' { ++ j++ ++ } ++ header := data[:j] ++ ++ // column count ignores pipes at beginning or end of line ++ if data[0] == '|' { ++ colCount-- ++ } ++ if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { ++ colCount-- ++ } ++ ++ columns = make([]CellAlignFlags, colCount) ++ ++ // move on to the header underline ++ i++ ++ if i >= len(data) { ++ return ++ } ++ ++ if data[i] == '|' && !isBackslashEscaped(data, i) { ++ i++ ++ } ++ i = skipChar(data, i, ' ') ++ ++ // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 ++ // and trailing | optional on last column ++ col := 0 ++ for i < len(data) && data[i] != '\n' { ++ dashes := 0 ++ ++ if data[i] == ':' { ++ i++ ++ columns[col] |= TableAlignmentLeft ++ dashes++ ++ } ++ for i < len(data) && data[i] == '-' { ++ i++ ++ dashes++ ++ } ++ if i < len(data) && data[i] == ':' { ++ i++ ++ columns[col] |= TableAlignmentRight ++ dashes++ ++ } ++ for i < len(data) && data[i] == ' ' { ++ i++ ++ } ++ if i == len(data) { ++ return ++ } ++ // end of column test is messy ++ switch { ++ case dashes < 3: ++ // not a valid column ++ return ++ ++ case data[i] == '|' && !isBackslashEscaped(data, i): ++ // marker found, now skip past trailing whitespace ++ col++ ++ i++ ++ for i < len(data) && data[i] == ' ' { ++ i++ ++ } ++ ++ // trailing junk found after last column ++ if col >= colCount && i < len(data) && data[i] != '\n' { ++ return ++ } ++ ++ case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: ++ // something else found where marker was required ++ return ++ ++ case data[i] == '\n': ++ // marker is optional for the last column ++ col++ ++ ++ default: ++ // trailing junk found after last column ++ return ++ } ++ } ++ if col != colCount { ++ return ++ } ++ ++ p.addBlock(TableHead, nil) ++ p.tableRow(header, columns, true) ++ size = i ++ if size < len(data) && data[size] == '\n' { ++ size++ ++ } ++ return ++} ++ ++func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { ++ p.addBlock(TableRow, nil) ++ i, col := 0, 0 ++ ++ if data[i] == '|' && !isBackslashEscaped(data, i) { ++ i++ ++ } ++ ++ for col = 0; col < len(columns) && i < len(data); col++ { ++ for i < len(data) && data[i] == ' ' { ++ i++ ++ } ++ ++ cellStart := i ++ ++ for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { ++ i++ ++ } ++ ++ cellEnd := i ++ ++ // skip the end-of-cell marker, possibly taking us past end of buffer ++ i++ ++ ++ for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { ++ cellEnd-- ++ } ++ ++ cell := p.addBlock(TableCell, data[cellStart:cellEnd]) ++ cell.IsHeader = header ++ cell.Align = columns[col] ++ } ++ ++ // pad it out with empty columns to get the right number ++ for ; col < len(columns); col++ { ++ cell := p.addBlock(TableCell, nil) ++ cell.IsHeader = header ++ cell.Align = columns[col] ++ } ++ ++ // silently ignore rows with too many cells ++} ++ ++// returns blockquote prefix length ++func (p *Markdown) quotePrefix(data []byte) int { ++ i := 0 ++ for i < 3 && i < len(data) && data[i] == ' ' { ++ i++ ++ } ++ if i < len(data) && data[i] == '>' { ++ if i+1 < len(data) && data[i+1] == ' ' { ++ return i + 2 ++ } ++ return i + 1 ++ } ++ return 0 ++} ++ ++// blockquote ends with at least one blank line ++// followed by something without a blockquote prefix ++func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { ++ if p.isEmpty(data[beg:]) <= 0 { ++ return false ++ } ++ if end >= len(data) { ++ return true ++ } ++ return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 ++} ++ ++// parse a blockquote fragment ++func (p *Markdown) quote(data []byte) int { ++ block := p.addBlock(BlockQuote, nil) ++ var raw bytes.Buffer ++ beg, end := 0, 0 ++ for beg < len(data) { ++ end = beg ++ // Step over whole lines, collecting them. While doing that, check for ++ // fenced code and if one's found, incorporate it altogether, ++ // irregardless of any contents inside it ++ for end < len(data) && data[end] != '\n' { ++ if p.extensions&FencedCode != 0 { ++ if i := p.fencedCodeBlock(data[end:], false); i > 0 { ++ // -1 to compensate for the extra end++ after the loop: ++ end += i - 1 ++ break ++ } ++ } ++ end++ ++ } ++ if end < len(data) && data[end] == '\n' { ++ end++ ++ } ++ if pre := p.quotePrefix(data[beg:]); pre > 0 { ++ // skip the prefix ++ beg += pre ++ } else if p.terminateBlockquote(data, beg, end) { ++ break ++ } ++ // this line is part of the blockquote ++ raw.Write(data[beg:end]) ++ beg = end ++ } ++ p.block(raw.Bytes()) ++ p.finalize(block) ++ return end ++} ++ ++// returns prefix length for block code ++func (p *Markdown) codePrefix(data []byte) int { ++ if len(data) >= 1 && data[0] == '\t' { ++ return 1 ++ } ++ if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { ++ return 4 ++ } ++ return 0 ++} ++ ++func (p *Markdown) code(data []byte) int { ++ var work bytes.Buffer ++ ++ i := 0 ++ for i < len(data) { ++ beg := i ++ for i < len(data) && data[i] != '\n' { ++ i++ ++ } ++ if i < len(data) && data[i] == '\n' { ++ i++ ++ } ++ ++ blankline := p.isEmpty(data[beg:i]) > 0 ++ if pre := p.codePrefix(data[beg:i]); pre > 0 { ++ beg += pre ++ } else if !blankline { ++ // non-empty, non-prefixed line breaks the pre ++ i = beg ++ break ++ } ++ ++ // verbatim copy to the working buffer ++ if blankline { ++ work.WriteByte('\n') ++ } else { ++ work.Write(data[beg:i]) ++ } ++ } ++ ++ // trim all the \n off the end of work ++ workbytes := work.Bytes() ++ eol := len(workbytes) ++ for eol > 0 && workbytes[eol-1] == '\n' { ++ eol-- ++ } ++ if eol != len(workbytes) { ++ work.Truncate(eol) ++ } ++ ++ work.WriteByte('\n') ++ ++ block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer ++ block.IsFenced = false ++ finalizeCodeBlock(block) ++ ++ return i ++} ++ ++// returns unordered list item prefix ++func (p *Markdown) uliPrefix(data []byte) int { ++ i := 0 ++ // start with up to 3 spaces ++ for i < len(data) && i < 3 && data[i] == ' ' { ++ i++ ++ } ++ if i >= len(data)-1 { ++ return 0 ++ } ++ // need one of {'*', '+', '-'} followed by a space or a tab ++ if (data[i] != '*' && data[i] != '+' && data[i] != '-') || ++ (data[i+1] != ' ' && data[i+1] != '\t') { ++ return 0 ++ } ++ return i + 2 ++} ++ ++// returns ordered list item prefix ++func (p *Markdown) oliPrefix(data []byte) int { ++ i := 0 ++ ++ // start with up to 3 spaces ++ for i < 3 && i < len(data) && data[i] == ' ' { ++ i++ ++ } ++ ++ // count the digits ++ start := i ++ for i < len(data) && data[i] >= '0' && data[i] <= '9' { ++ i++ ++ } ++ if start == i || i >= len(data)-1 { ++ return 0 ++ } ++ ++ // we need >= 1 digits followed by a dot and a space or a tab ++ if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { ++ return 0 ++ } ++ return i + 2 ++} ++ ++// returns definition list item prefix ++func (p *Markdown) dliPrefix(data []byte) int { ++ if len(data) < 2 { ++ return 0 ++ } ++ i := 0 ++ // need a ':' followed by a space or a tab ++ if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { ++ return 0 ++ } ++ for i < len(data) && data[i] == ' ' { ++ i++ ++ } ++ return i + 2 ++} ++ ++// parse ordered or unordered list block ++func (p *Markdown) list(data []byte, flags ListType) int { ++ i := 0 ++ flags |= ListItemBeginningOfList ++ block := p.addBlock(List, nil) ++ block.ListFlags = flags ++ block.Tight = true ++ ++ for i < len(data) { ++ skip := p.listItem(data[i:], &flags) ++ if flags&ListItemContainsBlock != 0 { ++ block.ListData.Tight = false ++ } ++ i += skip ++ if skip == 0 || flags&ListItemEndOfList != 0 { ++ break ++ } ++ flags &= ^ListItemBeginningOfList ++ } ++ ++ above := block.Parent ++ finalizeList(block) ++ p.tip = above ++ return i ++} ++ ++// Returns true if the list item is not the same type as its parent list ++func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { ++ if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { ++ return true ++ } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { ++ return true ++ } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { ++ return true ++ } ++ return false ++} ++ ++// Returns true if block ends with a blank line, descending if needed ++// into lists and sublists. ++func endsWithBlankLine(block *Node) bool { ++ // TODO: figure this out. Always false now. ++ for block != nil { ++ //if block.lastLineBlank { ++ //return true ++ //} ++ t := block.Type ++ if t == List || t == Item { ++ block = block.LastChild ++ } else { ++ break ++ } ++ } ++ return false ++} ++ ++func finalizeList(block *Node) { ++ block.open = false ++ item := block.FirstChild ++ for item != nil { ++ // check for non-final list item ending with blank line: ++ if endsWithBlankLine(item) && item.Next != nil { ++ block.ListData.Tight = false ++ break ++ } ++ // recurse into children of list item, to see if there are spaces ++ // between any of them: ++ subItem := item.FirstChild ++ for subItem != nil { ++ if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { ++ block.ListData.Tight = false ++ break ++ } ++ subItem = subItem.Next ++ } ++ item = item.Next ++ } ++} ++ ++// Parse a single list item. ++// Assumes initial prefix is already removed if this is a sublist. ++func (p *Markdown) listItem(data []byte, flags *ListType) int { ++ // keep track of the indentation of the first line ++ itemIndent := 0 ++ if data[0] == '\t' { ++ itemIndent += 4 ++ } else { ++ for itemIndent < 3 && data[itemIndent] == ' ' { ++ itemIndent++ ++ } ++ } ++ ++ var bulletChar byte = '*' ++ i := p.uliPrefix(data) ++ if i == 0 { ++ i = p.oliPrefix(data) ++ } else { ++ bulletChar = data[i-2] ++ } ++ if i == 0 { ++ i = p.dliPrefix(data) ++ // reset definition term flag ++ if i > 0 { ++ *flags &= ^ListTypeTerm ++ } ++ } ++ if i == 0 { ++ // if in definition list, set term flag and continue ++ if *flags&ListTypeDefinition != 0 { ++ *flags |= ListTypeTerm ++ } else { ++ return 0 ++ } ++ } ++ ++ // skip leading whitespace on first line ++ for i < len(data) && data[i] == ' ' { ++ i++ ++ } ++ ++ // find the end of the line ++ line := i ++ for i > 0 && i < len(data) && data[i-1] != '\n' { ++ i++ ++ } ++ ++ // get working buffer ++ var raw bytes.Buffer ++ ++ // put the first line into the working buffer ++ raw.Write(data[line:i]) ++ line = i ++ ++ // process the following lines ++ containsBlankLine := false ++ sublist := 0 ++ codeBlockMarker := "" ++ ++gatherlines: ++ for line < len(data) { ++ i++ ++ ++ // find the end of this line ++ for i < len(data) && data[i-1] != '\n' { ++ i++ ++ } ++ ++ // if it is an empty line, guess that it is part of this item ++ // and move on to the next line ++ if p.isEmpty(data[line:i]) > 0 { ++ containsBlankLine = true ++ line = i ++ continue ++ } ++ ++ // calculate the indentation ++ indent := 0 ++ indentIndex := 0 ++ if data[line] == '\t' { ++ indentIndex++ ++ indent += 4 ++ } else { ++ for indent < 4 && line+indent < i && data[line+indent] == ' ' { ++ indent++ ++ indentIndex++ ++ } ++ } ++ ++ chunk := data[line+indentIndex : i] ++ ++ if p.extensions&FencedCode != 0 { ++ // determine if in or out of codeblock ++ // if in codeblock, ignore normal list processing ++ _, marker := isFenceLine(chunk, nil, codeBlockMarker) ++ if marker != "" { ++ if codeBlockMarker == "" { ++ // start of codeblock ++ codeBlockMarker = marker ++ } else { ++ // end of codeblock. ++ codeBlockMarker = "" ++ } ++ } ++ // we are in a codeblock, write line, and continue ++ if codeBlockMarker != "" || marker != "" { ++ raw.Write(data[line+indentIndex : i]) ++ line = i ++ continue gatherlines ++ } ++ } ++ ++ // evaluate how this line fits in ++ switch { ++ // is this a nested list item? ++ case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || ++ p.oliPrefix(chunk) > 0 || ++ p.dliPrefix(chunk) > 0: ++ ++ // to be a nested list, it must be indented more ++ // if not, it is either a different kind of list ++ // or the next item in the same list ++ if indent <= itemIndent { ++ if p.listTypeChanged(chunk, flags) { ++ *flags |= ListItemEndOfList ++ } else if containsBlankLine { ++ *flags |= ListItemContainsBlock ++ } ++ ++ break gatherlines ++ } ++ ++ if containsBlankLine { ++ *flags |= ListItemContainsBlock ++ } ++ ++ // is this the first item in the nested list? ++ if sublist == 0 { ++ sublist = raw.Len() ++ } ++ ++ // is this a nested prefix heading? ++ case p.isPrefixHeading(chunk): ++ // if the heading is not indented, it is not nested in the list ++ // and thus ends the list ++ if containsBlankLine && indent < 4 { ++ *flags |= ListItemEndOfList ++ break gatherlines ++ } ++ *flags |= ListItemContainsBlock ++ ++ // anything following an empty line is only part ++ // of this item if it is indented 4 spaces ++ // (regardless of the indentation of the beginning of the item) ++ case containsBlankLine && indent < 4: ++ if *flags&ListTypeDefinition != 0 && i < len(data)-1 { ++ // is the next item still a part of this list? ++ next := i ++ for next < len(data) && data[next] != '\n' { ++ next++ ++ } ++ for next < len(data)-1 && data[next] == '\n' { ++ next++ ++ } ++ if i < len(data)-1 && data[i] != ':' && data[next] != ':' { ++ *flags |= ListItemEndOfList ++ } ++ } else { ++ *flags |= ListItemEndOfList ++ } ++ break gatherlines ++ ++ // a blank line means this should be parsed as a block ++ case containsBlankLine: ++ raw.WriteByte('\n') ++ *flags |= ListItemContainsBlock ++ } ++ ++ // if this line was preceded by one or more blanks, ++ // re-introduce the blank into the buffer ++ if containsBlankLine { ++ containsBlankLine = false ++ raw.WriteByte('\n') ++ } ++ ++ // add the line into the working buffer without prefix ++ raw.Write(data[line+indentIndex : i]) ++ ++ line = i ++ } ++ ++ rawBytes := raw.Bytes() ++ ++ block := p.addBlock(Item, nil) ++ block.ListFlags = *flags ++ block.Tight = false ++ block.BulletChar = bulletChar ++ block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark ++ ++ // render the contents of the list item ++ if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { ++ // intermediate render of block item, except for definition term ++ if sublist > 0 { ++ p.block(rawBytes[:sublist]) ++ p.block(rawBytes[sublist:]) ++ } else { ++ p.block(rawBytes) ++ } ++ } else { ++ // intermediate render of inline item ++ if sublist > 0 { ++ child := p.addChild(Paragraph, 0) ++ child.content = rawBytes[:sublist] ++ p.block(rawBytes[sublist:]) ++ } else { ++ child := p.addChild(Paragraph, 0) ++ child.content = rawBytes ++ } ++ } ++ return line ++} ++ ++// render a single paragraph that has already been parsed out ++func (p *Markdown) renderParagraph(data []byte) { ++ if len(data) == 0 { ++ return ++ } ++ ++ // trim leading spaces ++ beg := 0 ++ for data[beg] == ' ' { ++ beg++ ++ } ++ ++ end := len(data) ++ // trim trailing newline ++ if data[len(data)-1] == '\n' { ++ end-- ++ } ++ ++ // trim trailing spaces ++ for end > beg && data[end-1] == ' ' { ++ end-- ++ } ++ ++ p.addBlock(Paragraph, data[beg:end]) ++} ++ ++func (p *Markdown) paragraph(data []byte) int { ++ // prev: index of 1st char of previous line ++ // line: index of 1st char of current line ++ // i: index of cursor/end of current line ++ var prev, line, i int ++ tabSize := TabSizeDefault ++ if p.extensions&TabSizeEight != 0 { ++ tabSize = TabSizeDouble ++ } ++ // keep going until we find something to mark the end of the paragraph ++ for i < len(data) { ++ // mark the beginning of the current line ++ prev = line ++ current := data[i:] ++ line = i ++ ++ // did we find a reference or a footnote? If so, end a paragraph ++ // preceding it and report that we have consumed up to the end of that ++ // reference: ++ if refEnd := isReference(p, current, tabSize); refEnd > 0 { ++ p.renderParagraph(data[:i]) ++ return i + refEnd ++ } ++ ++ // did we find a blank line marking the end of the paragraph? ++ if n := p.isEmpty(current); n > 0 { ++ // did this blank line followed by a definition list item? ++ if p.extensions&DefinitionLists != 0 { ++ if i < len(data)-1 && data[i+1] == ':' { ++ return p.list(data[prev:], ListTypeDefinition) ++ } ++ } ++ ++ p.renderParagraph(data[:i]) ++ return i + n ++ } ++ ++ // an underline under some text marks a heading, so our paragraph ended on prev line ++ if i > 0 { ++ if level := p.isUnderlinedHeading(current); level > 0 { ++ // render the paragraph ++ p.renderParagraph(data[:prev]) ++ ++ // ignore leading and trailing whitespace ++ eol := i - 1 ++ for prev < eol && data[prev] == ' ' { ++ prev++ ++ } ++ for eol > prev && data[eol-1] == ' ' { ++ eol-- ++ } ++ ++ id := "" ++ if p.extensions&AutoHeadingIDs != 0 { ++ id = SanitizedAnchorName(string(data[prev:eol])) ++ } ++ ++ block := p.addBlock(Heading, data[prev:eol]) ++ block.Level = level ++ block.HeadingID = id ++ ++ // find the end of the underline ++ for i < len(data) && data[i] != '\n' { ++ i++ ++ } ++ return i ++ } ++ } ++ ++ // if the next line starts a block of HTML, then the paragraph ends here ++ if p.extensions&LaxHTMLBlocks != 0 { ++ if data[i] == '<' && p.html(current, false) > 0 { ++ // rewind to before the HTML block ++ p.renderParagraph(data[:i]) ++ return i ++ } ++ } ++ ++ // if there's a prefixed heading or a horizontal rule after this, paragraph is over ++ if p.isPrefixHeading(current) || p.isHRule(current) { ++ p.renderParagraph(data[:i]) ++ return i ++ } ++ ++ // if there's a fenced code block, paragraph is over ++ if p.extensions&FencedCode != 0 { ++ if p.fencedCodeBlock(current, false) > 0 { ++ p.renderParagraph(data[:i]) ++ return i ++ } ++ } ++ ++ // if there's a definition list item, prev line is a definition term ++ if p.extensions&DefinitionLists != 0 { ++ if p.dliPrefix(current) != 0 { ++ ret := p.list(data[prev:], ListTypeDefinition) ++ return ret ++ } ++ } ++ ++ // if there's a list after this, paragraph is over ++ if p.extensions&NoEmptyLineBeforeBlock != 0 { ++ if p.uliPrefix(current) != 0 || ++ p.oliPrefix(current) != 0 || ++ p.quotePrefix(current) != 0 || ++ p.codePrefix(current) != 0 { ++ p.renderParagraph(data[:i]) ++ return i ++ } ++ } ++ ++ // otherwise, scan to the beginning of the next line ++ nl := bytes.IndexByte(data[i:], '\n') ++ if nl >= 0 { ++ i += nl + 1 ++ } else { ++ i += len(data[i:]) ++ } ++ } ++ ++ p.renderParagraph(data[:i]) ++ return i ++} ++ ++func skipChar(data []byte, start int, char byte) int { ++ i := start ++ for i < len(data) && data[i] == char { ++ i++ ++ } ++ return i ++} ++ ++func skipUntilChar(text []byte, start int, char byte) int { ++ i := start ++ for i < len(text) && text[i] != char { ++ i++ ++ } ++ return i ++} ++ ++// SanitizedAnchorName returns a sanitized anchor name for the given text. ++// ++// It implements the algorithm specified in the package comment. ++func SanitizedAnchorName(text string) string { ++ var anchorName []rune ++ futureDash := false ++ for _, r := range text { ++ switch { ++ case unicode.IsLetter(r) || unicode.IsNumber(r): ++ if futureDash && len(anchorName) > 0 { ++ anchorName = append(anchorName, '-') ++ } ++ futureDash = false ++ anchorName = append(anchorName, unicode.ToLower(r)) ++ default: ++ futureDash = true ++ } ++ } ++ return string(anchorName) ++} +diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go +new file mode 100644 +index 000000000000..57ff152a0568 +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/doc.go +@@ -0,0 +1,46 @@ ++// Package blackfriday is a markdown processor. ++// ++// It translates plain text with simple formatting rules into an AST, which can ++// then be further processed to HTML (provided by Blackfriday itself) or other ++// formats (provided by the community). ++// ++// The simplest way to invoke Blackfriday is to call the Run function. It will ++// take a text input and produce a text output in HTML (or other format). ++// ++// A slightly more sophisticated way to use Blackfriday is to create a Markdown ++// processor and to call Parse, which returns a syntax tree for the input ++// document. You can leverage Blackfriday's parsing for content extraction from ++// markdown documents. You can assign a custom renderer and set various options ++// to the Markdown processor. ++// ++// If you're interested in calling Blackfriday from command line, see ++// https://github.com/russross/blackfriday-tool. ++// ++// Sanitized Anchor Names ++// ++// Blackfriday includes an algorithm for creating sanitized anchor names ++// corresponding to a given input text. This algorithm is used to create ++// anchors for headings when AutoHeadingIDs extension is enabled. The ++// algorithm is specified below, so that other packages can create ++// compatible anchor names and links to those anchors. ++// ++// The algorithm iterates over the input text, interpreted as UTF-8, ++// one Unicode code point (rune) at a time. All runes that are letters (category L) ++// or numbers (category N) are considered valid characters. They are mapped to ++// lower case, and included in the output. All other runes are considered ++// invalid characters. Invalid characters that precede the first valid character, ++// as well as invalid character that follow the last valid character ++// are dropped completely. All other sequences of invalid characters ++// between two valid characters are replaced with a single dash character '-'. ++// ++// SanitizedAnchorName exposes this functionality, and can be used to ++// create compatible links to the anchor names generated by blackfriday. ++// This algorithm is also implemented in a small standalone package at ++// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients ++// that want a small package and don't need full functionality of blackfriday. ++package blackfriday ++ ++// NOTE: Keep Sanitized Anchor Name algorithm in sync with package ++// github.com/shurcooL/sanitized_anchor_name. ++// Otherwise, users of sanitized_anchor_name will get anchor names ++// that are incompatible with those generated by blackfriday. +diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go +new file mode 100644 +index 000000000000..a2c3edb691c8 +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/entities.go +@@ -0,0 +1,2236 @@ ++package blackfriday ++ ++// Extracted from https://html.spec.whatwg.org/multipage/entities.json ++var entities = map[string]bool{ ++ "Æ": true, ++ "Æ": true, ++ "&": true, ++ "&": true, ++ "Á": true, ++ "Á": true, ++ "Ă": true, ++ "Â": true, ++ "Â": true, ++ "А": true, ++ "𝔄": true, ++ "À": true, ++ "À": true, ++ "Α": true, ++ "Ā": true, ++ "⩓": true, ++ "Ą": true, ++ "𝔸": true, ++ "⁡": true, ++ "Å": true, ++ "Å": true, ++ "𝒜": true, ++ "≔": true, ++ "Ã": true, ++ "Ã": true, ++ "Ä": true, ++ "Ä": true, ++ "∖": true, ++ "⫧": true, ++ "⌆": true, ++ "Б": true, ++ "∵": true, ++ "ℬ": true, ++ "Β": true, ++ "𝔅": true, ++ "𝔹": true, ++ "˘": true, ++ "ℬ": true, ++ "≎": true, ++ "Ч": true, ++ "©": true, ++ "©": true, ++ "Ć": true, ++ "⋒": true, ++ "ⅅ": true, ++ "ℭ": true, ++ "Č": true, ++ "Ç": true, ++ "Ç": true, ++ "Ĉ": true, ++ "∰": true, ++ "Ċ": true, ++ "¸": true, ++ "·": true, ++ "ℭ": true, ++ "Χ": true, ++ "⊙": true, ++ "⊖": true, ++ "⊕": true, ++ "⊗": true, ++ "∲": true, ++ "”": true, ++ "’": true, ++ "∷": true, ++ "⩴": true, ++ "≡": true, ++ "∯": true, ++ "∮": true, ++ "ℂ": true, ++ "∐": true, ++ "∳": true, ++ "⨯": true, ++ "𝒞": true, ++ "⋓": true, ++ "≍": true, ++ "ⅅ": true, ++ "⤑": true, ++ "Ђ": true, ++ "Ѕ": true, ++ "Џ": true, ++ "‡": true, ++ "↡": true, ++ "⫤": true, ++ "Ď": true, ++ "Д": true, ++ "∇": true, ++ "Δ": true, ++ "𝔇": true, ++ "´": true, ++ "˙": true, ++ "˝": true, ++ "`": true, ++ "˜": true, ++ "⋄": true, ++ "ⅆ": true, ++ "𝔻": true, ++ "¨": true, ++ "⃜": true, ++ "≐": true, ++ "∯": true, ++ "¨": true, ++ "⇓": true, ++ "⇐": true, ++ "⇔": true, ++ "⫤": true, ++ "⟸": true, ++ "⟺": true, ++ "⟹": true, ++ "⇒": true, ++ "⊨": true, ++ "⇑": true, ++ "⇕": true, ++ "∥": true, ++ "↓": true, ++ "⤓": true, ++ "⇵": true, ++ "̑": true, ++ "⥐": true, ++ "⥞": true, ++ "↽": true, ++ "⥖": true, ++ "⥟": true, ++ "⇁": true, ++ "⥗": true, ++ "⊤": true, ++ "↧": true, ++ "⇓": true, ++ "𝒟": true, ++ "Đ": true, ++ "Ŋ": true, ++ "Ð": true, ++ "Ð": true, ++ "É": true, ++ "É": true, ++ "Ě": true, ++ "Ê": true, ++ "Ê": true, ++ "Э": true, ++ "Ė": true, ++ "𝔈": true, ++ "È": true, ++ "È": true, ++ "∈": true, ++ "Ē": true, ++ "◻": true, ++ "▫": true, ++ "Ę": true, ++ "𝔼": true, ++ "Ε": true, ++ "⩵": true, ++ "≂": true, ++ "⇌": true, ++ "ℰ": true, ++ "⩳": true, ++ "Η": true, ++ "Ë": true, ++ "Ë": true, ++ "∃": true, ++ "ⅇ": true, ++ "Ф": true, ++ "𝔉": true, ++ "◼": true, ++ "▪": true, ++ "𝔽": true, ++ "∀": true, ++ "ℱ": true, ++ "ℱ": true, ++ "Ѓ": true, ++ ">": true, ++ ">": true, ++ "Γ": true, ++ "Ϝ": true, ++ "Ğ": true, ++ "Ģ": true, ++ "Ĝ": true, ++ "Г": true, ++ "Ġ": true, ++ "𝔊": true, ++ "⋙": true, ++ "𝔾": true, ++ "≥": true, ++ "⋛": true, ++ "≧": true, ++ "⪢": true, ++ "≷": true, ++ "⩾": true, ++ "≳": true, ++ "𝒢": true, ++ "≫": true, ++ "Ъ": true, ++ "ˇ": true, ++ "^": true, ++ "Ĥ": true, ++ "ℌ": true, ++ "ℋ": true, ++ "ℍ": true, ++ "─": true, ++ "ℋ": true, ++ "Ħ": true, ++ "≎": true, ++ "≏": true, ++ "Е": true, ++ "IJ": true, ++ "Ё": true, ++ "Í": true, ++ "Í": true, ++ "Î": true, ++ "Î": true, ++ "И": true, ++ "İ": true, ++ "ℑ": true, ++ "Ì": true, ++ "Ì": true, ++ "ℑ": true, ++ "Ī": true, ++ "ⅈ": true, ++ "⇒": true, ++ "∬": true, ++ "∫": true, ++ "⋂": true, ++ "⁣": true, ++ "⁢": true, ++ "Į": true, ++ "𝕀": true, ++ "Ι": true, ++ "ℐ": true, ++ "Ĩ": true, ++ "І": true, ++ "Ï": true, ++ "Ï": true, ++ "Ĵ": true, ++ "Й": true, ++ "𝔍": true, ++ "𝕁": true, ++ "𝒥": true, ++ "Ј": true, ++ "Є": true, ++ "Х": true, ++ "Ќ": true, ++ "Κ": true, ++ "Ķ": true, ++ "К": true, ++ "𝔎": true, ++ "𝕂": true, ++ "𝒦": true, ++ "Љ": true, ++ "<": true, ++ "<": true, ++ "Ĺ": true, ++ "Λ": true, ++ "⟪": true, ++ "ℒ": true, ++ "↞": true, ++ "Ľ": true, ++ "Ļ": true, ++ "Л": true, ++ "⟨": true, ++ "←": true, ++ "⇤": true, ++ "⇆": true, ++ "⌈": true, ++ "⟦": true, ++ "⥡": true, ++ "⇃": true, ++ "⥙": true, ++ "⌊": true, ++ "↔": true, ++ "⥎": true, ++ "⊣": true, ++ "↤": true, ++ "⥚": true, ++ "⊲": true, ++ "⧏": true, ++ "⊴": true, ++ "⥑": true, ++ "⥠": true, ++ "↿": true, ++ "⥘": true, ++ "↼": true, ++ "⥒": true, ++ "⇐": true, ++ "⇔": true, ++ "⋚": true, ++ "≦": true, ++ "≶": true, ++ "⪡": true, ++ "⩽": true, ++ "≲": true, ++ "𝔏": true, ++ "⋘": true, ++ "⇚": true, ++ "Ŀ": true, ++ "⟵": true, ++ "⟷": true, ++ "⟶": true, ++ "⟸": true, ++ "⟺": true, ++ "⟹": true, ++ "𝕃": true, ++ "↙": true, ++ "↘": true, ++ "ℒ": true, ++ "↰": true, ++ "Ł": true, ++ "≪": true, ++ "⤅": true, ++ "М": true, ++ " ": true, ++ "ℳ": true, ++ "𝔐": true, ++ "∓": true, ++ "𝕄": true, ++ "ℳ": true, ++ "Μ": true, ++ "Њ": true, ++ "Ń": true, ++ "Ň": true, ++ "Ņ": true, ++ "Н": true, ++ "​": true, ++ "​": true, ++ "​": true, ++ "​": true, ++ "≫": true, ++ "≪": true, ++ " ": true, ++ "𝔑": true, ++ "⁠": true, ++ " ": true, ++ "ℕ": true, ++ "⫬": true, ++ "≢": true, ++ "≭": true, ++ "∦": true, ++ "∉": true, ++ "≠": true, ++ "≂̸": true, ++ "∄": true, ++ "≯": true, ++ "≱": true, ++ "≧̸": true, ++ "≫̸": true, ++ "≹": true, ++ "⩾̸": true, ++ "≵": true, ++ "≎̸": true, ++ "≏̸": true, ++ "⋪": true, ++ "⧏̸": true, ++ "⋬": true, ++ "≮": true, ++ "≰": true, ++ "≸": true, ++ "≪̸": true, ++ "⩽̸": true, ++ "≴": true, ++ "⪢̸": true, ++ "⪡̸": true, ++ "⊀": true, ++ "⪯̸": true, ++ "⋠": true, ++ "∌": true, ++ "⋫": true, ++ "⧐̸": true, ++ "⋭": true, ++ "⊏̸": true, ++ "⋢": true, ++ "⊐̸": true, ++ "⋣": true, ++ "⊂⃒": true, ++ "⊈": true, ++ "⊁": true, ++ "⪰̸": true, ++ "⋡": true, ++ "≿̸": true, ++ "⊃⃒": true, ++ "⊉": true, ++ "≁": true, ++ "≄": true, ++ "≇": true, ++ "≉": true, ++ "∤": true, ++ "𝒩": true, ++ "Ñ": true, ++ "Ñ": true, ++ "Ν": true, ++ "Œ": true, ++ "Ó": true, ++ "Ó": true, ++ "Ô": true, ++ "Ô": true, ++ "О": true, ++ "Ő": true, ++ "𝔒": true, ++ "Ò": true, ++ "Ò": true, ++ "Ō": true, ++ "Ω": true, ++ "Ο": true, ++ "𝕆": true, ++ "“": true, ++ "‘": true, ++ "⩔": true, ++ "𝒪": true, ++ "Ø": true, ++ "Ø": true, ++ "Õ": true, ++ "Õ": true, ++ "⨷": true, ++ "Ö": true, ++ "Ö": true, ++ "‾": true, ++ "⏞": true, ++ "⎴": true, ++ "⏜": true, ++ "∂": true, ++ "П": true, ++ "𝔓": true, ++ "Φ": true, ++ "Π": true, ++ "±": true, ++ "ℌ": true, ++ "ℙ": true, ++ "⪻": true, ++ "≺": true, ++ "⪯": true, ++ "≼": true, ++ "≾": true, ++ "″": true, ++ "∏": true, ++ "∷": true, ++ "∝": true, ++ "𝒫": true, ++ "Ψ": true, ++ """: true, ++ """: true, ++ "𝔔": true, ++ "ℚ": true, ++ "𝒬": true, ++ "⤐": true, ++ "®": true, ++ "®": true, ++ "Ŕ": true, ++ "⟫": true, ++ "↠": true, ++ "⤖": true, ++ "Ř": true, ++ "Ŗ": true, ++ "Р": true, ++ "ℜ": true, ++ "∋": true, ++ "⇋": true, ++ "⥯": true, ++ "ℜ": true, ++ "Ρ": true, ++ "⟩": true, ++ "→": true, ++ "⇥": true, ++ "⇄": true, ++ "⌉": true, ++ "⟧": true, ++ "⥝": true, ++ "⇂": true, ++ "⥕": true, ++ "⌋": true, ++ "⊢": true, ++ "↦": true, ++ "⥛": true, ++ "⊳": true, ++ "⧐": true, ++ "⊵": true, ++ "⥏": true, ++ "⥜": true, ++ "↾": true, ++ "⥔": true, ++ "⇀": true, ++ "⥓": true, ++ "⇒": true, ++ "ℝ": true, ++ "⥰": true, ++ "⇛": true, ++ "ℛ": true, ++ "↱": true, ++ "⧴": true, ++ "Щ": true, ++ "Ш": true, ++ "Ь": true, ++ "Ś": true, ++ "⪼": true, ++ "Š": true, ++ "Ş": true, ++ "Ŝ": true, ++ "С": true, ++ "𝔖": true, ++ "↓": true, ++ "←": true, ++ "→": true, ++ "↑": true, ++ "Σ": true, ++ "∘": true, ++ "𝕊": true, ++ "√": true, ++ "□": true, ++ "⊓": true, ++ "⊏": true, ++ "⊑": true, ++ "⊐": true, ++ "⊒": true, ++ "⊔": true, ++ "𝒮": true, ++ "⋆": true, ++ "⋐": true, ++ "⋐": true, ++ "⊆": true, ++ "≻": true, ++ "⪰": true, ++ "≽": true, ++ "≿": true, ++ "∋": true, ++ "∑": true, ++ "⋑": true, ++ "⊃": true, ++ "⊇": true, ++ "⋑": true, ++ "Þ": true, ++ "Þ": true, ++ "™": true, ++ "Ћ": true, ++ "Ц": true, ++ " ": true, ++ "Τ": true, ++ "Ť": true, ++ "Ţ": true, ++ "Т": true, ++ "𝔗": true, ++ "∴": true, ++ "Θ": true, ++ "  ": true, ++ " ": true, ++ "∼": true, ++ "≃": true, ++ "≅": true, ++ "≈": true, ++ "𝕋": true, ++ "⃛": true, ++ "𝒯": true, ++ "Ŧ": true, ++ "Ú": true, ++ "Ú": true, ++ "↟": true, ++ "⥉": true, ++ "Ў": true, ++ "Ŭ": true, ++ "Û": true, ++ "Û": true, ++ "У": true, ++ "Ű": true, ++ "𝔘": true, ++ "Ù": true, ++ "Ù": true, ++ "Ū": true, ++ "_": true, ++ "⏟": true, ++ "⎵": true, ++ "⏝": true, ++ "⋃": true, ++ "⊎": true, ++ "Ų": true, ++ "𝕌": true, ++ "↑": true, ++ "⤒": true, ++ "⇅": true, ++ "↕": true, ++ "⥮": true, ++ "⊥": true, ++ "↥": true, ++ "⇑": true, ++ "⇕": true, ++ "↖": true, ++ "↗": true, ++ "ϒ": true, ++ "Υ": true, ++ "Ů": true, ++ "𝒰": true, ++ "Ũ": true, ++ "Ü": true, ++ "Ü": true, ++ "⊫": true, ++ "⫫": true, ++ "В": true, ++ "⊩": true, ++ "⫦": true, ++ "⋁": true, ++ "‖": true, ++ "‖": true, ++ "∣": true, ++ "|": true, ++ "❘": true, ++ "≀": true, ++ " ": true, ++ "𝔙": true, ++ "𝕍": true, ++ "𝒱": true, ++ "⊪": true, ++ "Ŵ": true, ++ "⋀": true, ++ "𝔚": true, ++ "𝕎": true, ++ "𝒲": true, ++ "𝔛": true, ++ "Ξ": true, ++ "𝕏": true, ++ "𝒳": true, ++ "Я": true, ++ "Ї": true, ++ "Ю": true, ++ "Ý": true, ++ "Ý": true, ++ "Ŷ": true, ++ "Ы": true, ++ "𝔜": true, ++ "𝕐": true, ++ "𝒴": true, ++ "Ÿ": true, ++ "Ж": true, ++ "Ź": true, ++ "Ž": true, ++ "З": true, ++ "Ż": true, ++ "​": true, ++ "Ζ": true, ++ "ℨ": true, ++ "ℤ": true, ++ "𝒵": true, ++ "á": true, ++ "á": true, ++ "ă": true, ++ "∾": true, ++ "∾̳": true, ++ "∿": true, ++ "â": true, ++ "â": true, ++ "´": true, ++ "´": true, ++ "а": true, ++ "æ": true, ++ "æ": true, ++ "⁡": true, ++ "𝔞": true, ++ "à": true, ++ "à": true, ++ "ℵ": true, ++ "ℵ": true, ++ "α": true, ++ "ā": true, ++ "⨿": true, ++ "&": true, ++ "&": true, ++ "∧": true, ++ "⩕": true, ++ "⩜": true, ++ "⩘": true, ++ "⩚": true, ++ "∠": true, ++ "⦤": true, ++ "∠": true, ++ "∡": true, ++ "⦨": true, ++ "⦩": true, ++ "⦪": true, ++ "⦫": true, ++ "⦬": true, ++ "⦭": true, ++ "⦮": true, ++ "⦯": true, ++ "∟": true, ++ "⊾": true, ++ "⦝": true, ++ "∢": true, ++ "Å": true, ++ "⍼": true, ++ "ą": true, ++ "𝕒": true, ++ "≈": true, ++ "⩰": true, ++ "⩯": true, ++ "≊": true, ++ "≋": true, ++ "'": true, ++ "≈": true, ++ "≊": true, ++ "å": true, ++ "å": true, ++ "𝒶": true, ++ "*": true, ++ "≈": true, ++ "≍": true, ++ "ã": true, ++ "ã": true, ++ "ä": true, ++ "ä": true, ++ "∳": true, ++ "⨑": true, ++ "⫭": true, ++ "≌": true, ++ "϶": true, ++ "‵": true, ++ "∽": true, ++ "⋍": true, ++ "⊽": true, ++ "⌅": true, ++ "⌅": true, ++ "⎵": true, ++ "⎶": true, ++ "≌": true, ++ "б": true, ++ "„": true, ++ "∵": true, ++ "∵": true, ++ "⦰": true, ++ "϶": true, ++ "ℬ": true, ++ "β": true, ++ "ℶ": true, ++ "≬": true, ++ "𝔟": true, ++ "⋂": true, ++ "◯": true, ++ "⋃": true, ++ "⨀": true, ++ "⨁": true, ++ "⨂": true, ++ "⨆": true, ++ "★": true, ++ "▽": true, ++ "△": true, ++ "⨄": true, ++ "⋁": true, ++ "⋀": true, ++ "⤍": true, ++ "⧫": true, ++ "▪": true, ++ "▴": true, ++ "▾": true, ++ "◂": true, ++ "▸": true, ++ "␣": true, ++ "▒": true, ++ "░": true, ++ "▓": true, ++ "█": true, ++ "=⃥": true, ++ "≡⃥": true, ++ "⌐": true, ++ "𝕓": true, ++ "⊥": true, ++ "⊥": true, ++ "⋈": true, ++ "╗": true, ++ "╔": true, ++ "╖": true, ++ "╓": true, ++ "═": true, ++ "╦": true, ++ "╩": true, ++ "╤": true, ++ "╧": true, ++ "╝": true, ++ "╚": true, ++ "╜": true, ++ "╙": true, ++ "║": true, ++ "╬": true, ++ "╣": true, ++ "╠": true, ++ "╫": true, ++ "╢": true, ++ "╟": true, ++ "⧉": true, ++ "╕": true, ++ "╒": true, ++ "┐": true, ++ "┌": true, ++ "─": true, ++ "╥": true, ++ "╨": true, ++ "┬": true, ++ "┴": true, ++ "⊟": true, ++ "⊞": true, ++ "⊠": true, ++ "╛": true, ++ "╘": true, ++ "┘": true, ++ "└": true, ++ "│": true, ++ "╪": true, ++ "╡": true, ++ "╞": true, ++ "┼": true, ++ "┤": true, ++ "├": true, ++ "‵": true, ++ "˘": true, ++ "¦": true, ++ "¦": true, ++ "𝒷": true, ++ "⁏": true, ++ "∽": true, ++ "⋍": true, ++ "\": true, ++ "⧅": true, ++ "⟈": true, ++ "•": true, ++ "•": true, ++ "≎": true, ++ "⪮": true, ++ "≏": true, ++ "≏": true, ++ "ć": true, ++ "∩": true, ++ "⩄": true, ++ "⩉": true, ++ "⩋": true, ++ "⩇": true, ++ "⩀": true, ++ "∩︀": true, ++ "⁁": true, ++ "ˇ": true, ++ "⩍": true, ++ "č": true, ++ "ç": true, ++ "ç": true, ++ "ĉ": true, ++ "⩌": true, ++ "⩐": true, ++ "ċ": true, ++ "¸": true, ++ "¸": true, ++ "⦲": true, ++ "¢": true, ++ "¢": true, ++ "·": true, ++ "𝔠": true, ++ "ч": true, ++ "✓": true, ++ "✓": true, ++ "χ": true, ++ "○": true, ++ "⧃": true, ++ "ˆ": true, ++ "≗": true, ++ "↺": true, ++ "↻": true, ++ "®": true, ++ "Ⓢ": true, ++ "⊛": true, ++ "⊚": true, ++ "⊝": true, ++ "≗": true, ++ "⨐": true, ++ "⫯": true, ++ "⧂": true, ++ "♣": true, ++ "♣": true, ++ ":": true, ++ "≔": true, ++ "≔": true, ++ ",": true, ++ "@": true, ++ "∁": true, ++ "∘": true, ++ "∁": true, ++ "ℂ": true, ++ "≅": true, ++ "⩭": true, ++ "∮": true, ++ "𝕔": true, ++ "∐": true, ++ "©": true, ++ "©": true, ++ "℗": true, ++ "↵": true, ++ "✗": true, ++ "𝒸": true, ++ "⫏": true, ++ "⫑": true, ++ "⫐": true, ++ "⫒": true, ++ "⋯": true, ++ "⤸": true, ++ "⤵": true, ++ "⋞": true, ++ "⋟": true, ++ "↶": true, ++ "⤽": true, ++ "∪": true, ++ "⩈": true, ++ "⩆": true, ++ "⩊": true, ++ "⊍": true, ++ "⩅": true, ++ "∪︀": true, ++ "↷": true, ++ "⤼": true, ++ "⋞": true, ++ "⋟": true, ++ "⋎": true, ++ "⋏": true, ++ "¤": true, ++ "¤": true, ++ "↶": true, ++ "↷": true, ++ "⋎": true, ++ "⋏": true, ++ "∲": true, ++ "∱": true, ++ "⌭": true, ++ "⇓": true, ++ "⥥": true, ++ "†": true, ++ "ℸ": true, ++ "↓": true, ++ "‐": true, ++ "⊣": true, ++ "⤏": true, ++ "˝": true, ++ "ď": true, ++ "д": true, ++ "ⅆ": true, ++ "‡": true, ++ "⇊": true, ++ "⩷": true, ++ "°": true, ++ "°": true, ++ "δ": true, ++ "⦱": true, ++ "⥿": true, ++ "𝔡": true, ++ "⇃": true, ++ "⇂": true, ++ "⋄": true, ++ "⋄": true, ++ "♦": true, ++ "♦": true, ++ "¨": true, ++ "ϝ": true, ++ "⋲": true, ++ "÷": true, ++ "÷": true, ++ "÷": true, ++ "⋇": true, ++ "⋇": true, ++ "ђ": true, ++ "⌞": true, ++ "⌍": true, ++ "$": true, ++ "𝕕": true, ++ "˙": true, ++ "≐": true, ++ "≑": true, ++ "∸": true, ++ "∔": true, ++ "⊡": true, ++ "⌆": true, ++ "↓": true, ++ "⇊": true, ++ "⇃": true, ++ "⇂": true, ++ "⤐": true, ++ "⌟": true, ++ "⌌": true, ++ "𝒹": true, ++ "ѕ": true, ++ "⧶": true, ++ "đ": true, ++ "⋱": true, ++ "▿": true, ++ "▾": true, ++ "⇵": true, ++ "⥯": true, ++ "⦦": true, ++ "џ": true, ++ "⟿": true, ++ "⩷": true, ++ "≑": true, ++ "é": true, ++ "é": true, ++ "⩮": true, ++ "ě": true, ++ "≖": true, ++ "ê": true, ++ "ê": true, ++ "≕": true, ++ "э": true, ++ "ė": true, ++ "ⅇ": true, ++ "≒": true, ++ "𝔢": true, ++ "⪚": true, ++ "è": true, ++ "è": true, ++ "⪖": true, ++ "⪘": true, ++ "⪙": true, ++ "⏧": true, ++ "ℓ": true, ++ "⪕": true, ++ "⪗": true, ++ "ē": true, ++ "∅": true, ++ "∅": true, ++ "∅": true, ++ " ": true, ++ " ": true, ++ " ": true, ++ "ŋ": true, ++ " ": true, ++ "ę": true, ++ "𝕖": true, ++ "⋕": true, ++ "⧣": true, ++ "⩱": true, ++ "ε": true, ++ "ε": true, ++ "ϵ": true, ++ "≖": true, ++ "≕": true, ++ "≂": true, ++ "⪖": true, ++ "⪕": true, ++ "=": true, ++ "≟": true, ++ "≡": true, ++ "⩸": true, ++ "⧥": true, ++ "≓": true, ++ "⥱": true, ++ "ℯ": true, ++ "≐": true, ++ "≂": true, ++ "η": true, ++ "ð": true, ++ "ð": true, ++ "ë": true, ++ "ë": true, ++ "€": true, ++ "!": true, ++ "∃": true, ++ "ℰ": true, ++ "ⅇ": true, ++ "≒": true, ++ "ф": true, ++ "♀": true, ++ "ffi": true, ++ "ff": true, ++ "ffl": true, ++ "𝔣": true, ++ "fi": true, ++ "fj": true, ++ "♭": true, ++ "fl": true, ++ "▱": true, ++ "ƒ": true, ++ "𝕗": true, ++ "∀": true, ++ "⋔": true, ++ "⫙": true, ++ "⨍": true, ++ "½": true, ++ "½": true, ++ "⅓": true, ++ "¼": true, ++ "¼": true, ++ "⅕": true, ++ "⅙": true, ++ "⅛": true, ++ "⅔": true, ++ "⅖": true, ++ "¾": true, ++ "¾": true, ++ "⅗": true, ++ "⅜": true, ++ "⅘": true, ++ "⅚": true, ++ "⅝": true, ++ "⅞": true, ++ "⁄": true, ++ "⌢": true, ++ "𝒻": true, ++ "≧": true, ++ "⪌": true, ++ "ǵ": true, ++ "γ": true, ++ "ϝ": true, ++ "⪆": true, ++ "ğ": true, ++ "ĝ": true, ++ "г": true, ++ "ġ": true, ++ "≥": true, ++ "⋛": true, ++ "≥": true, ++ "≧": true, ++ "⩾": true, ++ "⩾": true, ++ "⪩": true, ++ "⪀": true, ++ "⪂": true, ++ "⪄": true, ++ "⋛︀": true, ++ "⪔": true, ++ "𝔤": true, ++ "≫": true, ++ "⋙": true, ++ "ℷ": true, ++ "ѓ": true, ++ "≷": true, ++ "⪒": true, ++ "⪥": true, ++ "⪤": true, ++ "≩": true, ++ "⪊": true, ++ "⪊": true, ++ "⪈": true, ++ "⪈": true, ++ "≩": true, ++ "⋧": true, ++ "𝕘": true, ++ "`": true, ++ "ℊ": true, ++ "≳": true, ++ "⪎": true, ++ "⪐": true, ++ ">": true, ++ ">": true, ++ "⪧": true, ++ "⩺": true, ++ "⋗": true, ++ "⦕": true, ++ "⩼": true, ++ "⪆": true, ++ "⥸": true, ++ "⋗": true, ++ "⋛": true, ++ "⪌": true, ++ "≷": true, ++ "≳": true, ++ "≩︀": true, ++ "≩︀": true, ++ "⇔": true, ++ " ": true, ++ "½": true, ++ "ℋ": true, ++ "ъ": true, ++ "↔": true, ++ "⥈": true, ++ "↭": true, ++ "ℏ": true, ++ "ĥ": true, ++ "♥": true, ++ "♥": true, ++ "…": true, ++ "⊹": true, ++ "𝔥": true, ++ "⤥": true, ++ "⤦": true, ++ "⇿": true, ++ "∻": true, ++ "↩": true, ++ "↪": true, ++ "𝕙": true, ++ "―": true, ++ "𝒽": true, ++ "ℏ": true, ++ "ħ": true, ++ "⁃": true, ++ "‐": true, ++ "í": true, ++ "í": true, ++ "⁣": true, ++ "î": true, ++ "î": true, ++ "и": true, ++ "е": true, ++ "¡": true, ++ "¡": true, ++ "⇔": true, ++ "𝔦": true, ++ "ì": true, ++ "ì": true, ++ "ⅈ": true, ++ "⨌": true, ++ "∭": true, ++ "⧜": true, ++ "℩": true, ++ "ij": true, ++ "ī": true, ++ "ℑ": true, ++ "ℐ": true, ++ "ℑ": true, ++ "ı": true, ++ "⊷": true, ++ "Ƶ": true, ++ "∈": true, ++ "℅": true, ++ "∞": true, ++ "⧝": true, ++ "ı": true, ++ "∫": true, ++ "⊺": true, ++ "ℤ": true, ++ "⊺": true, ++ "⨗": true, ++ "⨼": true, ++ "ё": true, ++ "į": true, ++ "𝕚": true, ++ "ι": true, ++ "⨼": true, ++ "¿": true, ++ "¿": true, ++ "𝒾": true, ++ "∈": true, ++ "⋹": true, ++ "⋵": true, ++ "⋴": true, ++ "⋳": true, ++ "∈": true, ++ "⁢": true, ++ "ĩ": true, ++ "і": true, ++ "ï": true, ++ "ï": true, ++ "ĵ": true, ++ "й": true, ++ "𝔧": true, ++ "ȷ": true, ++ "𝕛": true, ++ "𝒿": true, ++ "ј": true, ++ "є": true, ++ "κ": true, ++ "ϰ": true, ++ "ķ": true, ++ "к": true, ++ "𝔨": true, ++ "ĸ": true, ++ "х": true, ++ "ќ": true, ++ "𝕜": true, ++ "𝓀": true, ++ "⇚": true, ++ "⇐": true, ++ "⤛": true, ++ "⤎": true, ++ "≦": true, ++ "⪋": true, ++ "⥢": true, ++ "ĺ": true, ++ "⦴": true, ++ "ℒ": true, ++ "λ": true, ++ "⟨": true, ++ "⦑": true, ++ "⟨": true, ++ "⪅": true, ++ "«": true, ++ "«": true, ++ "←": true, ++ "⇤": true, ++ "⤟": true, ++ "⤝": true, ++ "↩": true, ++ "↫": true, ++ "⤹": true, ++ "⥳": true, ++ "↢": true, ++ "⪫": true, ++ "⤙": true, ++ "⪭": true, ++ "⪭︀": true, ++ "⤌": true, ++ "❲": true, ++ "{": true, ++ "[": true, ++ "⦋": true, ++ "⦏": true, ++ "⦍": true, ++ "ľ": true, ++ "ļ": true, ++ "⌈": true, ++ "{": true, ++ "л": true, ++ "⤶": true, ++ "“": true, ++ "„": true, ++ "⥧": true, ++ "⥋": true, ++ "↲": true, ++ "≤": true, ++ "←": true, ++ "↢": true, ++ "↽": true, ++ "↼": true, ++ "⇇": true, ++ "↔": true, ++ "⇆": true, ++ "⇋": true, ++ "↭": true, ++ "⋋": true, ++ "⋚": true, ++ "≤": true, ++ "≦": true, ++ "⩽": true, ++ "⩽": true, ++ "⪨": true, ++ "⩿": true, ++ "⪁": true, ++ "⪃": true, ++ "⋚︀": true, ++ "⪓": true, ++ "⪅": true, ++ "⋖": true, ++ "⋚": true, ++ "⪋": true, ++ "≶": true, ++ "≲": true, ++ "⥼": true, ++ "⌊": true, ++ "𝔩": true, ++ "≶": true, ++ "⪑": true, ++ "↽": true, ++ "↼": true, ++ "⥪": true, ++ "▄": true, ++ "љ": true, ++ "≪": true, ++ "⇇": true, ++ "⌞": true, ++ "⥫": true, ++ "◺": true, ++ "ŀ": true, ++ "⎰": true, ++ "⎰": true, ++ "≨": true, ++ "⪉": true, ++ "⪉": true, ++ "⪇": true, ++ "⪇": true, ++ "≨": true, ++ "⋦": true, ++ "⟬": true, ++ "⇽": true, ++ "⟦": true, ++ "⟵": true, ++ "⟷": true, ++ "⟼": true, ++ "⟶": true, ++ "↫": true, ++ "↬": true, ++ "⦅": true, ++ "𝕝": true, ++ "⨭": true, ++ "⨴": true, ++ "∗": true, ++ "_": true, ++ "◊": true, ++ "◊": true, ++ "⧫": true, ++ "(": true, ++ "⦓": true, ++ "⇆": true, ++ "⌟": true, ++ "⇋": true, ++ "⥭": true, ++ "‎": true, ++ "⊿": true, ++ "‹": true, ++ "𝓁": true, ++ "↰": true, ++ "≲": true, ++ "⪍": true, ++ "⪏": true, ++ "[": true, ++ "‘": true, ++ "‚": true, ++ "ł": true, ++ "<": true, ++ "<": true, ++ "⪦": true, ++ "⩹": true, ++ "⋖": true, ++ "⋋": true, ++ "⋉": true, ++ "⥶": true, ++ "⩻": true, ++ "⦖": true, ++ "◃": true, ++ "⊴": true, ++ "◂": true, ++ "⥊": true, ++ "⥦": true, ++ "≨︀": true, ++ "≨︀": true, ++ "∺": true, ++ "¯": true, ++ "¯": true, ++ "♂": true, ++ "✠": true, ++ "✠": true, ++ "↦": true, ++ "↦": true, ++ "↧": true, ++ "↤": true, ++ "↥": true, ++ "▮": true, ++ "⨩": true, ++ "м": true, ++ "—": true, ++ "∡": true, ++ "𝔪": true, ++ "℧": true, ++ "µ": true, ++ "µ": true, ++ "∣": true, ++ "*": true, ++ "⫰": true, ++ "·": true, ++ "·": true, ++ "−": true, ++ "⊟": true, ++ "∸": true, ++ "⨪": true, ++ "⫛": true, ++ "…": true, ++ "∓": true, ++ "⊧": true, ++ "𝕞": true, ++ "∓": true, ++ "𝓂": true, ++ "∾": true, ++ "μ": true, ++ "⊸": true, ++ "⊸": true, ++ "⋙̸": true, ++ "≫⃒": true, ++ "≫̸": true, ++ "⇍": true, ++ "⇎": true, ++ "⋘̸": true, ++ "≪⃒": true, ++ "≪̸": true, ++ "⇏": true, ++ "⊯": true, ++ "⊮": true, ++ "∇": true, ++ "ń": true, ++ "∠⃒": true, ++ "≉": true, ++ "⩰̸": true, ++ "≋̸": true, ++ "ʼn": true, ++ "≉": true, ++ "♮": true, ++ "♮": true, ++ "ℕ": true, ++ " ": true, ++ " ": true, ++ "≎̸": true, ++ "≏̸": true, ++ "⩃": true, ++ "ň": true, ++ "ņ": true, ++ "≇": true, ++ "⩭̸": true, ++ "⩂": true, ++ "н": true, ++ "–": true, ++ "≠": true, ++ "⇗": true, ++ "⤤": true, ++ "↗": true, ++ "↗": true, ++ "≐̸": true, ++ "≢": true, ++ "⤨": true, ++ "≂̸": true, ++ "∄": true, ++ "∄": true, ++ "𝔫": true, ++ "≧̸": true, ++ "≱": true, ++ "≱": true, ++ "≧̸": true, ++ "⩾̸": true, ++ "⩾̸": true, ++ "≵": true, ++ "≯": true, ++ "≯": true, ++ "⇎": true, ++ "↮": true, ++ "⫲": true, ++ "∋": true, ++ "⋼": true, ++ "⋺": true, ++ "∋": true, ++ "њ": true, ++ "⇍": true, ++ "≦̸": true, ++ "↚": true, ++ "‥": true, ++ "≰": true, ++ "↚": true, ++ "↮": true, ++ "≰": true, ++ "≦̸": true, ++ "⩽̸": true, ++ "⩽̸": true, ++ "≮": true, ++ "≴": true, ++ "≮": true, ++ "⋪": true, ++ "⋬": true, ++ "∤": true, ++ "𝕟": true, ++ "¬": true, ++ "¬": true, ++ "∉": true, ++ "⋹̸": true, ++ "⋵̸": true, ++ "∉": true, ++ "⋷": true, ++ "⋶": true, ++ "∌": true, ++ "∌": true, ++ "⋾": true, ++ "⋽": true, ++ "∦": true, ++ "∦": true, ++ "⫽⃥": true, ++ "∂̸": true, ++ "⨔": true, ++ "⊀": true, ++ "⋠": true, ++ "⪯̸": true, ++ "⊀": true, ++ "⪯̸": true, ++ "⇏": true, ++ "↛": true, ++ "⤳̸": true, ++ "↝̸": true, ++ "↛": true, ++ "⋫": true, ++ "⋭": true, ++ "⊁": true, ++ "⋡": true, ++ "⪰̸": true, ++ "𝓃": true, ++ "∤": true, ++ "∦": true, ++ "≁": true, ++ "≄": true, ++ "≄": true, ++ "∤": true, ++ "∦": true, ++ "⋢": true, ++ "⋣": true, ++ "⊄": true, ++ "⫅̸": true, ++ "⊈": true, ++ "⊂⃒": true, ++ "⊈": true, ++ "⫅̸": true, ++ "⊁": true, ++ "⪰̸": true, ++ "⊅": true, ++ "⫆̸": true, ++ "⊉": true, ++ "⊃⃒": true, ++ "⊉": true, ++ "⫆̸": true, ++ "≹": true, ++ "ñ": true, ++ "ñ": true, ++ "≸": true, ++ "⋪": true, ++ "⋬": true, ++ "⋫": true, ++ "⋭": true, ++ "ν": true, ++ "#": true, ++ "№": true, ++ " ": true, ++ "⊭": true, ++ "⤄": true, ++ "≍⃒": true, ++ "⊬": true, ++ "≥⃒": true, ++ ">⃒": true, ++ "⧞": true, ++ "⤂": true, ++ "≤⃒": true, ++ "<⃒": true, ++ "⊴⃒": true, ++ "⤃": true, ++ "⊵⃒": true, ++ "∼⃒": true, ++ "⇖": true, ++ "⤣": true, ++ "↖": true, ++ "↖": true, ++ "⤧": true, ++ "Ⓢ": true, ++ "ó": true, ++ "ó": true, ++ "⊛": true, ++ "⊚": true, ++ "ô": true, ++ "ô": true, ++ "о": true, ++ "⊝": true, ++ "ő": true, ++ "⨸": true, ++ "⊙": true, ++ "⦼": true, ++ "œ": true, ++ "⦿": true, ++ "𝔬": true, ++ "˛": true, ++ "ò": true, ++ "ò": true, ++ "⧁": true, ++ "⦵": true, ++ "Ω": true, ++ "∮": true, ++ "↺": true, ++ "⦾": true, ++ "⦻": true, ++ "‾": true, ++ "⧀": true, ++ "ō": true, ++ "ω": true, ++ "ο": true, ++ "⦶": true, ++ "⊖": true, ++ "𝕠": true, ++ "⦷": true, ++ "⦹": true, ++ "⊕": true, ++ "∨": true, ++ "↻": true, ++ "⩝": true, ++ "ℴ": true, ++ "ℴ": true, ++ "ª": true, ++ "ª": true, ++ "º": true, ++ "º": true, ++ "⊶": true, ++ "⩖": true, ++ "⩗": true, ++ "⩛": true, ++ "ℴ": true, ++ "ø": true, ++ "ø": true, ++ "⊘": true, ++ "õ": true, ++ "õ": true, ++ "⊗": true, ++ "⨶": true, ++ "ö": true, ++ "ö": true, ++ "⌽": true, ++ "∥": true, ++ "¶": true, ++ "¶": true, ++ "∥": true, ++ "⫳": true, ++ "⫽": true, ++ "∂": true, ++ "п": true, ++ "%": true, ++ ".": true, ++ "‰": true, ++ "⊥": true, ++ "‱": true, ++ "𝔭": true, ++ "φ": true, ++ "ϕ": true, ++ "ℳ": true, ++ "☎": true, ++ "π": true, ++ "⋔": true, ++ "ϖ": true, ++ "ℏ": true, ++ "ℎ": true, ++ "ℏ": true, ++ "+": true, ++ "⨣": true, ++ "⊞": true, ++ "⨢": true, ++ "∔": true, ++ "⨥": true, ++ "⩲": true, ++ "±": true, ++ "±": true, ++ "⨦": true, ++ "⨧": true, ++ "±": true, ++ "⨕": true, ++ "𝕡": true, ++ "£": true, ++ "£": true, ++ "≺": true, ++ "⪳": true, ++ "⪷": true, ++ "≼": true, ++ "⪯": true, ++ "≺": true, ++ "⪷": true, ++ "≼": true, ++ "⪯": true, ++ "⪹": true, ++ "⪵": true, ++ "⋨": true, ++ "≾": true, ++ "′": true, ++ "ℙ": true, ++ "⪵": true, ++ "⪹": true, ++ "⋨": true, ++ "∏": true, ++ "⌮": true, ++ "⌒": true, ++ "⌓": true, ++ "∝": true, ++ "∝": true, ++ "≾": true, ++ "⊰": true, ++ "𝓅": true, ++ "ψ": true, ++ " ": true, ++ "𝔮": true, ++ "⨌": true, ++ "𝕢": true, ++ "⁗": true, ++ "𝓆": true, ++ "ℍ": true, ++ "⨖": true, ++ "?": true, ++ "≟": true, ++ """: true, ++ """: true, ++ "⇛": true, ++ "⇒": true, ++ "⤜": true, ++ "⤏": true, ++ "⥤": true, ++ "∽̱": true, ++ "ŕ": true, ++ "√": true, ++ "⦳": true, ++ "⟩": true, ++ "⦒": true, ++ "⦥": true, ++ "⟩": true, ++ "»": true, ++ "»": true, ++ "→": true, ++ "⥵": true, ++ "⇥": true, ++ "⤠": true, ++ "⤳": true, ++ "⤞": true, ++ "↪": true, ++ "↬": true, ++ "⥅": true, ++ "⥴": true, ++ "↣": true, ++ "↝": true, ++ "⤚": true, ++ "∶": true, ++ "ℚ": true, ++ "⤍": true, ++ "❳": true, ++ "}": true, ++ "]": true, ++ "⦌": true, ++ "⦎": true, ++ "⦐": true, ++ "ř": true, ++ "ŗ": true, ++ "⌉": true, ++ "}": true, ++ "р": true, ++ "⤷": true, ++ "⥩": true, ++ "”": true, ++ "”": true, ++ "↳": true, ++ "ℜ": true, ++ "ℛ": true, ++ "ℜ": true, ++ "ℝ": true, ++ "▭": true, ++ "®": true, ++ "®": true, ++ "⥽": true, ++ "⌋": true, ++ "𝔯": true, ++ "⇁": true, ++ "⇀": true, ++ "⥬": true, ++ "ρ": true, ++ "ϱ": true, ++ "→": true, ++ "↣": true, ++ "⇁": true, ++ "⇀": true, ++ "⇄": true, ++ "⇌": true, ++ "⇉": true, ++ "↝": true, ++ "⋌": true, ++ "˚": true, ++ "≓": true, ++ "⇄": true, ++ "⇌": true, ++ "‏": true, ++ "⎱": true, ++ "⎱": true, ++ "⫮": true, ++ "⟭": true, ++ "⇾": true, ++ "⟧": true, ++ "⦆": true, ++ "𝕣": true, ++ "⨮": true, ++ "⨵": true, ++ ")": true, ++ "⦔": true, ++ "⨒": true, ++ "⇉": true, ++ "›": true, ++ "𝓇": true, ++ "↱": true, ++ "]": true, ++ "’": true, ++ "’": true, ++ "⋌": true, ++ "⋊": true, ++ "▹": true, ++ "⊵": true, ++ "▸": true, ++ "⧎": true, ++ "⥨": true, ++ "℞": true, ++ "ś": true, ++ "‚": true, ++ "≻": true, ++ "⪴": true, ++ "⪸": true, ++ "š": true, ++ "≽": true, ++ "⪰": true, ++ "ş": true, ++ "ŝ": true, ++ "⪶": true, ++ "⪺": true, ++ "⋩": true, ++ "⨓": true, ++ "≿": true, ++ "с": true, ++ "⋅": true, ++ "⊡": true, ++ "⩦": true, ++ "⇘": true, ++ "⤥": true, ++ "↘": true, ++ "↘": true, ++ "§": true, ++ "§": true, ++ ";": true, ++ "⤩": true, ++ "∖": true, ++ "∖": true, ++ "✶": true, ++ "𝔰": true, ++ "⌢": true, ++ "♯": true, ++ "щ": true, ++ "ш": true, ++ "∣": true, ++ "∥": true, ++ "­": true, ++ "­": true, ++ "σ": true, ++ "ς": true, ++ "ς": true, ++ "∼": true, ++ "⩪": true, ++ "≃": true, ++ "≃": true, ++ "⪞": true, ++ "⪠": true, ++ "⪝": true, ++ "⪟": true, ++ "≆": true, ++ "⨤": true, ++ "⥲": true, ++ "←": true, ++ "∖": true, ++ "⨳": true, ++ "⧤": true, ++ "∣": true, ++ "⌣": true, ++ "⪪": true, ++ "⪬": true, ++ "⪬︀": true, ++ "ь": true, ++ "/": true, ++ "⧄": true, ++ "⌿": true, ++ "𝕤": true, ++ "♠": true, ++ "♠": true, ++ "∥": true, ++ "⊓": true, ++ "⊓︀": true, ++ "⊔": true, ++ "⊔︀": true, ++ "⊏": true, ++ "⊑": true, ++ "⊏": true, ++ "⊑": true, ++ "⊐": true, ++ "⊒": true, ++ "⊐": true, ++ "⊒": true, ++ "□": true, ++ "□": true, ++ "▪": true, ++ "▪": true, ++ "→": true, ++ "𝓈": true, ++ "∖": true, ++ "⌣": true, ++ "⋆": true, ++ "☆": true, ++ "★": true, ++ "ϵ": true, ++ "ϕ": true, ++ "¯": true, ++ "⊂": true, ++ "⫅": true, ++ "⪽": true, ++ "⊆": true, ++ "⫃": true, ++ "⫁": true, ++ "⫋": true, ++ "⊊": true, ++ "⪿": true, ++ "⥹": true, ++ "⊂": true, ++ "⊆": true, ++ "⫅": true, ++ "⊊": true, ++ "⫋": true, ++ "⫇": true, ++ "⫕": true, ++ "⫓": true, ++ "≻": true, ++ "⪸": true, ++ "≽": true, ++ "⪰": true, ++ "⪺": true, ++ "⪶": true, ++ "⋩": true, ++ "≿": true, ++ "∑": true, ++ "♪": true, ++ "¹": true, ++ "¹": true, ++ "²": true, ++ "²": true, ++ "³": true, ++ "³": true, ++ "⊃": true, ++ "⫆": true, ++ "⪾": true, ++ "⫘": true, ++ "⊇": true, ++ "⫄": true, ++ "⟉": true, ++ "⫗": true, ++ "⥻": true, ++ "⫂": true, ++ "⫌": true, ++ "⊋": true, ++ "⫀": true, ++ "⊃": true, ++ "⊇": true, ++ "⫆": true, ++ "⊋": true, ++ "⫌": true, ++ "⫈": true, ++ "⫔": true, ++ "⫖": true, ++ "⇙": true, ++ "⤦": true, ++ "↙": true, ++ "↙": true, ++ "⤪": true, ++ "ß": true, ++ "ß": true, ++ "⌖": true, ++ "τ": true, ++ "⎴": true, ++ "ť": true, ++ "ţ": true, ++ "т": true, ++ "⃛": true, ++ "⌕": true, ++ "𝔱": true, ++ "∴": true, ++ "∴": true, ++ "θ": true, ++ "ϑ": true, ++ "ϑ": true, ++ "≈": true, ++ "∼": true, ++ " ": true, ++ "≈": true, ++ "∼": true, ++ "þ": true, ++ "þ": true, ++ "˜": true, ++ "×": true, ++ "×": true, ++ "⊠": true, ++ "⨱": true, ++ "⨰": true, ++ "∭": true, ++ "⤨": true, ++ "⊤": true, ++ "⌶": true, ++ "⫱": true, ++ "𝕥": true, ++ "⫚": true, ++ "⤩": true, ++ "‴": true, ++ "™": true, ++ "▵": true, ++ "▿": true, ++ "◃": true, ++ "⊴": true, ++ "≜": true, ++ "▹": true, ++ "⊵": true, ++ "◬": true, ++ "≜": true, ++ "⨺": true, ++ "⨹": true, ++ "⧍": true, ++ "⨻": true, ++ "⏢": true, ++ "𝓉": true, ++ "ц": true, ++ "ћ": true, ++ "ŧ": true, ++ "≬": true, ++ "↞": true, ++ "↠": true, ++ "⇑": true, ++ "⥣": true, ++ "ú": true, ++ "ú": true, ++ "↑": true, ++ "ў": true, ++ "ŭ": true, ++ "û": true, ++ "û": true, ++ "у": true, ++ "⇅": true, ++ "ű": true, ++ "⥮": true, ++ "⥾": true, ++ "𝔲": true, ++ "ù": true, ++ "ù": true, ++ "↿": true, ++ "↾": true, ++ "▀": true, ++ "⌜": true, ++ "⌜": true, ++ "⌏": true, ++ "◸": true, ++ "ū": true, ++ "¨": true, ++ "¨": true, ++ "ų": true, ++ "𝕦": true, ++ "↑": true, ++ "↕": true, ++ "↿": true, ++ "↾": true, ++ "⊎": true, ++ "υ": true, ++ "ϒ": true, ++ "υ": true, ++ "⇈": true, ++ "⌝": true, ++ "⌝": true, ++ "⌎": true, ++ "ů": true, ++ "◹": true, ++ "𝓊": true, ++ "⋰": true, ++ "ũ": true, ++ "▵": true, ++ "▴": true, ++ "⇈": true, ++ "ü": true, ++ "ü": true, ++ "⦧": true, ++ "⇕": true, ++ "⫨": true, ++ "⫩": true, ++ "⊨": true, ++ "⦜": true, ++ "ϵ": true, ++ "ϰ": true, ++ "∅": true, ++ "ϕ": true, ++ "ϖ": true, ++ "∝": true, ++ "↕": true, ++ "ϱ": true, ++ "ς": true, ++ "⊊︀": true, ++ "⫋︀": true, ++ "⊋︀": true, ++ "⫌︀": true, ++ "ϑ": true, ++ "⊲": true, ++ "⊳": true, ++ "в": true, ++ "⊢": true, ++ "∨": true, ++ "⊻": true, ++ "≚": true, ++ "⋮": true, ++ "|": true, ++ "|": true, ++ "𝔳": true, ++ "⊲": true, ++ "⊂⃒": true, ++ "⊃⃒": true, ++ "𝕧": true, ++ "∝": true, ++ "⊳": true, ++ "𝓋": true, ++ "⫋︀": true, ++ "⊊︀": true, ++ "⫌︀": true, ++ "⊋︀": true, ++ "⦚": true, ++ "ŵ": true, ++ "⩟": true, ++ "∧": true, ++ "≙": true, ++ "℘": true, ++ "𝔴": true, ++ "𝕨": true, ++ "℘": true, ++ "≀": true, ++ "≀": true, ++ "𝓌": true, ++ "⋂": true, ++ "◯": true, ++ "⋃": true, ++ "▽": true, ++ "𝔵": true, ++ "⟺": true, ++ "⟷": true, ++ "ξ": true, ++ "⟸": true, ++ "⟵": true, ++ "⟼": true, ++ "⋻": true, ++ "⨀": true, ++ "𝕩": true, ++ "⨁": true, ++ "⨂": true, ++ "⟹": true, ++ "⟶": true, ++ "𝓍": true, ++ "⨆": true, ++ "⨄": true, ++ "△": true, ++ "⋁": true, ++ "⋀": true, ++ "ý": true, ++ "ý": true, ++ "я": true, ++ "ŷ": true, ++ "ы": true, ++ "¥": true, ++ "¥": true, ++ "𝔶": true, ++ "ї": true, ++ "𝕪": true, ++ "𝓎": true, ++ "ю": true, ++ "ÿ": true, ++ "ÿ": true, ++ "ź": true, ++ "ž": true, ++ "з": true, ++ "ż": true, ++ "ℨ": true, ++ "ζ": true, ++ "𝔷": true, ++ "ж": true, ++ "⇝": true, ++ "𝕫": true, ++ "𝓏": true, ++ "‍": true, ++ "‌": true, ++} +diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go +new file mode 100644 +index 000000000000..6ab60102c9bf +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/esc.go +@@ -0,0 +1,70 @@ ++package blackfriday ++ ++import ( ++ "html" ++ "io" ++) ++ ++var htmlEscaper = [256][]byte{ ++ '&': []byte("&"), ++ '<': []byte("<"), ++ '>': []byte(">"), ++ '"': []byte("""), ++} ++ ++func escapeHTML(w io.Writer, s []byte) { ++ escapeEntities(w, s, false) ++} ++ ++func escapeAllHTML(w io.Writer, s []byte) { ++ escapeEntities(w, s, true) ++} ++ ++func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { ++ var start, end int ++ for end < len(s) { ++ escSeq := htmlEscaper[s[end]] ++ if escSeq != nil { ++ isEntity, entityEnd := nodeIsEntity(s, end) ++ if isEntity && !escapeValidEntities { ++ w.Write(s[start : entityEnd+1]) ++ start = entityEnd + 1 ++ } else { ++ w.Write(s[start:end]) ++ w.Write(escSeq) ++ start = end + 1 ++ } ++ } ++ end++ ++ } ++ if start < len(s) && end <= len(s) { ++ w.Write(s[start:end]) ++ } ++} ++ ++func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { ++ isEntity = false ++ endEntityPos = end + 1 ++ ++ if s[end] == '&' { ++ for endEntityPos < len(s) { ++ if s[endEntityPos] == ';' { ++ if entities[string(s[end:endEntityPos+1])] { ++ isEntity = true ++ break ++ } ++ } ++ if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { ++ break ++ } ++ endEntityPos++ ++ } ++ } ++ ++ return isEntity, endEntityPos ++} ++ ++func escLink(w io.Writer, text []byte) { ++ unesc := html.UnescapeString(string(text)) ++ escapeHTML(w, []byte(unesc)) ++} +diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go +new file mode 100644 +index 000000000000..cb4f26e30fd5 +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/html.go +@@ -0,0 +1,952 @@ ++// ++// Blackfriday Markdown Processor ++// Available at http://github.com/russross/blackfriday ++// ++// Copyright © 2011 Russ Ross . ++// Distributed under the Simplified BSD License. ++// See README.md for details. ++// ++ ++// ++// ++// HTML rendering backend ++// ++// ++ ++package blackfriday ++ ++import ( ++ "bytes" ++ "fmt" ++ "io" ++ "regexp" ++ "strings" ++) ++ ++// HTMLFlags control optional behavior of HTML renderer. ++type HTMLFlags int ++ ++// HTML renderer configuration options. ++const ( ++ HTMLFlagsNone HTMLFlags = 0 ++ SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks ++ SkipImages // Skip embedded images ++ SkipLinks // Skip all links ++ Safelink // Only link to trusted protocols ++ NofollowLinks // Only link with rel="nofollow" ++ NoreferrerLinks // Only link with rel="noreferrer" ++ NoopenerLinks // Only link with rel="noopener" ++ HrefTargetBlank // Add a blank target ++ CompletePage // Generate a complete HTML page ++ UseXHTML // Generate XHTML output instead of HTML ++ FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source ++ Smartypants // Enable smart punctuation substitutions ++ SmartypantsFractions // Enable smart fractions (with Smartypants) ++ SmartypantsDashes // Enable smart dashes (with Smartypants) ++ SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) ++ SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering ++ SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) ++ TOC // Generate a table of contents ++) ++ ++var ( ++ htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) ++) ++ ++const ( ++ htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + ++ processingInstruction + "|" + declaration + "|" + cdata + ")" ++ closeTag = "]" ++ openTag = "<" + tagName + attribute + "*" + "\\s*/?>" ++ attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" ++ attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" ++ attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" ++ attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" ++ cdata = "" ++ declaration = "]*>" ++ doubleQuotedValue = "\"[^\"]*\"" ++ htmlComment = "|" ++ processingInstruction = "[<][?].*?[?][>]" ++ singleQuotedValue = "'[^']*'" ++ tagName = "[A-Za-z][A-Za-z0-9-]*" ++ unquotedValue = "[^\"'=<>`\\x00-\\x20]+" ++) ++ ++// HTMLRendererParameters is a collection of supplementary parameters tweaking ++// the behavior of various parts of HTML renderer. ++type HTMLRendererParameters struct { ++ // Prepend this text to each relative URL. ++ AbsolutePrefix string ++ // Add this text to each footnote anchor, to ensure uniqueness. ++ FootnoteAnchorPrefix string ++ // Show this text inside the tag for a footnote return link, if the ++ // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string ++ // [return] is used. ++ FootnoteReturnLinkContents string ++ // If set, add this text to the front of each Heading ID, to ensure ++ // uniqueness. ++ HeadingIDPrefix string ++ // If set, add this text to the back of each Heading ID, to ensure uniqueness. ++ HeadingIDSuffix string ++ // Increase heading levels: if the offset is 1,

becomes

etc. ++ // Negative offset is also valid. ++ // Resulting levels are clipped between 1 and 6. ++ HeadingLevelOffset int ++ ++ Title string // Document title (used if CompletePage is set) ++ CSS string // Optional CSS file URL (used if CompletePage is set) ++ Icon string // Optional icon file URL (used if CompletePage is set) ++ ++ Flags HTMLFlags // Flags allow customizing this renderer's behavior ++} ++ ++// HTMLRenderer is a type that implements the Renderer interface for HTML output. ++// ++// Do not create this directly, instead use the NewHTMLRenderer function. ++type HTMLRenderer struct { ++ HTMLRendererParameters ++ ++ closeTag string // how to end singleton tags: either " />" or ">" ++ ++ // Track heading IDs to prevent ID collision in a single generation. ++ headingIDs map[string]int ++ ++ lastOutputLen int ++ disableTags int ++ ++ sr *SPRenderer ++} ++ ++const ( ++ xhtmlClose = " />" ++ htmlClose = ">" ++) ++ ++// NewHTMLRenderer creates and configures an HTMLRenderer object, which ++// satisfies the Renderer interface. ++func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { ++ // configure the rendering engine ++ closeTag := htmlClose ++ if params.Flags&UseXHTML != 0 { ++ closeTag = xhtmlClose ++ } ++ ++ if params.FootnoteReturnLinkContents == "" { ++ // U+FE0E is VARIATION SELECTOR-15. ++ // It suppresses automatic emoji presentation of the preceding ++ // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. ++ params.FootnoteReturnLinkContents = "↩\ufe0e" ++ } ++ ++ return &HTMLRenderer{ ++ HTMLRendererParameters: params, ++ ++ closeTag: closeTag, ++ headingIDs: make(map[string]int), ++ ++ sr: NewSmartypantsRenderer(params.Flags), ++ } ++} ++ ++func isHTMLTag(tag []byte, tagname string) bool { ++ found, _ := findHTMLTagPos(tag, tagname) ++ return found ++} ++ ++// Look for a character, but ignore it when it's in any kind of quotes, it ++// might be JavaScript ++func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { ++ inSingleQuote := false ++ inDoubleQuote := false ++ inGraveQuote := false ++ i := start ++ for i < len(html) { ++ switch { ++ case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: ++ return i ++ case html[i] == '\'': ++ inSingleQuote = !inSingleQuote ++ case html[i] == '"': ++ inDoubleQuote = !inDoubleQuote ++ case html[i] == '`': ++ inGraveQuote = !inGraveQuote ++ } ++ i++ ++ } ++ return start ++} ++ ++func findHTMLTagPos(tag []byte, tagname string) (bool, int) { ++ i := 0 ++ if i < len(tag) && tag[0] != '<' { ++ return false, -1 ++ } ++ i++ ++ i = skipSpace(tag, i) ++ ++ if i < len(tag) && tag[i] == '/' { ++ i++ ++ } ++ ++ i = skipSpace(tag, i) ++ j := 0 ++ for ; i < len(tag); i, j = i+1, j+1 { ++ if j >= len(tagname) { ++ break ++ } ++ ++ if strings.ToLower(string(tag[i]))[0] != tagname[j] { ++ return false, -1 ++ } ++ } ++ ++ if i == len(tag) { ++ return false, -1 ++ } ++ ++ rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') ++ if rightAngle >= i { ++ return true, rightAngle ++ } ++ ++ return false, -1 ++} ++ ++func skipSpace(tag []byte, i int) int { ++ for i < len(tag) && isspace(tag[i]) { ++ i++ ++ } ++ return i ++} ++ ++func isRelativeLink(link []byte) (yes bool) { ++ // a tag begin with '#' ++ if link[0] == '#' { ++ return true ++ } ++ ++ // link begin with '/' but not '//', the second maybe a protocol relative link ++ if len(link) >= 2 && link[0] == '/' && link[1] != '/' { ++ return true ++ } ++ ++ // only the root '/' ++ if len(link) == 1 && link[0] == '/' { ++ return true ++ } ++ ++ // current directory : begin with "./" ++ if bytes.HasPrefix(link, []byte("./")) { ++ return true ++ } ++ ++ // parent directory : begin with "../" ++ if bytes.HasPrefix(link, []byte("../")) { ++ return true ++ } ++ ++ return false ++} ++ ++func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { ++ for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { ++ tmp := fmt.Sprintf("%s-%d", id, count+1) ++ ++ if _, tmpFound := r.headingIDs[tmp]; !tmpFound { ++ r.headingIDs[id] = count + 1 ++ id = tmp ++ } else { ++ id = id + "-1" ++ } ++ } ++ ++ if _, found := r.headingIDs[id]; !found { ++ r.headingIDs[id] = 0 ++ } ++ ++ return id ++} ++ ++func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { ++ if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { ++ newDest := r.AbsolutePrefix ++ if link[0] != '/' { ++ newDest += "/" ++ } ++ newDest += string(link) ++ return []byte(newDest) ++ } ++ return link ++} ++ ++func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { ++ if isRelativeLink(link) { ++ return attrs ++ } ++ val := []string{} ++ if flags&NofollowLinks != 0 { ++ val = append(val, "nofollow") ++ } ++ if flags&NoreferrerLinks != 0 { ++ val = append(val, "noreferrer") ++ } ++ if flags&NoopenerLinks != 0 { ++ val = append(val, "noopener") ++ } ++ if flags&HrefTargetBlank != 0 { ++ attrs = append(attrs, "target=\"_blank\"") ++ } ++ if len(val) == 0 { ++ return attrs ++ } ++ attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) ++ return append(attrs, attr) ++} ++ ++func isMailto(link []byte) bool { ++ return bytes.HasPrefix(link, []byte("mailto:")) ++} ++ ++func needSkipLink(flags HTMLFlags, dest []byte) bool { ++ if flags&SkipLinks != 0 { ++ return true ++ } ++ return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) ++} ++ ++func isSmartypantable(node *Node) bool { ++ pt := node.Parent.Type ++ return pt != Link && pt != CodeBlock && pt != Code ++} ++ ++func appendLanguageAttr(attrs []string, info []byte) []string { ++ if len(info) == 0 { ++ return attrs ++ } ++ endOfLang := bytes.IndexAny(info, "\t ") ++ if endOfLang < 0 { ++ endOfLang = len(info) ++ } ++ return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) ++} ++ ++func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { ++ w.Write(name) ++ if len(attrs) > 0 { ++ w.Write(spaceBytes) ++ w.Write([]byte(strings.Join(attrs, " "))) ++ } ++ w.Write(gtBytes) ++ r.lastOutputLen = 1 ++} ++ ++func footnoteRef(prefix string, node *Node) []byte { ++ urlFrag := prefix + string(slugify(node.Destination)) ++ anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) ++ return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) ++} ++ ++func footnoteItem(prefix string, slug []byte) []byte { ++ return []byte(fmt.Sprintf(`
  • `, prefix, slug)) ++} ++ ++func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { ++ const format = ` %s` ++ return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) ++} ++ ++func itemOpenCR(node *Node) bool { ++ if node.Prev == nil { ++ return false ++ } ++ ld := node.Parent.ListData ++ return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 ++} ++ ++func skipParagraphTags(node *Node) bool { ++ grandparent := node.Parent.Parent ++ if grandparent == nil || grandparent.Type != List { ++ return false ++ } ++ tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 ++ return grandparent.Type == List && tightOrTerm ++} ++ ++func cellAlignment(align CellAlignFlags) string { ++ switch align { ++ case TableAlignmentLeft: ++ return "left" ++ case TableAlignmentRight: ++ return "right" ++ case TableAlignmentCenter: ++ return "center" ++ default: ++ return "" ++ } ++} ++ ++func (r *HTMLRenderer) out(w io.Writer, text []byte) { ++ if r.disableTags > 0 { ++ w.Write(htmlTagRe.ReplaceAll(text, []byte{})) ++ } else { ++ w.Write(text) ++ } ++ r.lastOutputLen = len(text) ++} ++ ++func (r *HTMLRenderer) cr(w io.Writer) { ++ if r.lastOutputLen > 0 { ++ r.out(w, nlBytes) ++ } ++} ++ ++var ( ++ nlBytes = []byte{'\n'} ++ gtBytes = []byte{'>'} ++ spaceBytes = []byte{' '} ++) ++ ++var ( ++ brTag = []byte("
    ") ++ brXHTMLTag = []byte("
    ") ++ emTag = []byte("") ++ emCloseTag = []byte("") ++ strongTag = []byte("") ++ strongCloseTag = []byte("") ++ delTag = []byte("") ++ delCloseTag = []byte("") ++ ttTag = []byte("") ++ ttCloseTag = []byte("") ++ aTag = []byte("") ++ preTag = []byte("
    ")
    ++	preCloseTag        = []byte("
    ") ++ codeTag = []byte("") ++ codeCloseTag = []byte("") ++ pTag = []byte("

    ") ++ pCloseTag = []byte("

    ") ++ blockquoteTag = []byte("
    ") ++ blockquoteCloseTag = []byte("
    ") ++ hrTag = []byte("
    ") ++ hrXHTMLTag = []byte("
    ") ++ ulTag = []byte("
      ") ++ ulCloseTag = []byte("
    ") ++ olTag = []byte("
      ") ++ olCloseTag = []byte("
    ") ++ dlTag = []byte("
    ") ++ dlCloseTag = []byte("
    ") ++ liTag = []byte("
  • ") ++ liCloseTag = []byte("
  • ") ++ ddTag = []byte("
    ") ++ ddCloseTag = []byte("
    ") ++ dtTag = []byte("
    ") ++ dtCloseTag = []byte("
    ") ++ tableTag = []byte("") ++ tableCloseTag = []byte("
    ") ++ tdTag = []byte("") ++ thTag = []byte("") ++ theadTag = []byte("") ++ theadCloseTag = []byte("") ++ tbodyTag = []byte("") ++ tbodyCloseTag = []byte("") ++ trTag = []byte("") ++ trCloseTag = []byte("") ++ h1Tag = []byte("") ++ h2Tag = []byte("") ++ h3Tag = []byte("") ++ h4Tag = []byte("") ++ h5Tag = []byte("") ++ h6Tag = []byte("") ++ ++ footnotesDivBytes = []byte("\n
    \n\n") ++ footnotesCloseDivBytes = []byte("\n
    \n") ++) ++ ++func headingTagsFromLevel(level int) ([]byte, []byte) { ++ if level <= 1 { ++ return h1Tag, h1CloseTag ++ } ++ switch level { ++ case 2: ++ return h2Tag, h2CloseTag ++ case 3: ++ return h3Tag, h3CloseTag ++ case 4: ++ return h4Tag, h4CloseTag ++ case 5: ++ return h5Tag, h5CloseTag ++ } ++ return h6Tag, h6CloseTag ++} ++ ++func (r *HTMLRenderer) outHRTag(w io.Writer) { ++ if r.Flags&UseXHTML == 0 { ++ r.out(w, hrTag) ++ } else { ++ r.out(w, hrXHTMLTag) ++ } ++} ++ ++// RenderNode is a default renderer of a single node of a syntax tree. For ++// block nodes it will be called twice: first time with entering=true, second ++// time with entering=false, so that it could know when it's working on an open ++// tag and when on close. It writes the result to w. ++// ++// The return value is a way to tell the calling walker to adjust its walk ++// pattern: e.g. it can terminate the traversal by returning Terminate. Or it ++// can ask the walker to skip a subtree of this node by returning SkipChildren. ++// The typical behavior is to return GoToNext, which asks for the usual ++// traversal to the next node. ++func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { ++ attrs := []string{} ++ switch node.Type { ++ case Text: ++ if r.Flags&Smartypants != 0 { ++ var tmp bytes.Buffer ++ escapeHTML(&tmp, node.Literal) ++ r.sr.Process(w, tmp.Bytes()) ++ } else { ++ if node.Parent.Type == Link { ++ escLink(w, node.Literal) ++ } else { ++ escapeHTML(w, node.Literal) ++ } ++ } ++ case Softbreak: ++ r.cr(w) ++ // TODO: make it configurable via out(renderer.softbreak) ++ case Hardbreak: ++ if r.Flags&UseXHTML == 0 { ++ r.out(w, brTag) ++ } else { ++ r.out(w, brXHTMLTag) ++ } ++ r.cr(w) ++ case Emph: ++ if entering { ++ r.out(w, emTag) ++ } else { ++ r.out(w, emCloseTag) ++ } ++ case Strong: ++ if entering { ++ r.out(w, strongTag) ++ } else { ++ r.out(w, strongCloseTag) ++ } ++ case Del: ++ if entering { ++ r.out(w, delTag) ++ } else { ++ r.out(w, delCloseTag) ++ } ++ case HTMLSpan: ++ if r.Flags&SkipHTML != 0 { ++ break ++ } ++ r.out(w, node.Literal) ++ case Link: ++ // mark it but don't link it if it is not a safe link: no smartypants ++ dest := node.LinkData.Destination ++ if needSkipLink(r.Flags, dest) { ++ if entering { ++ r.out(w, ttTag) ++ } else { ++ r.out(w, ttCloseTag) ++ } ++ } else { ++ if entering { ++ dest = r.addAbsPrefix(dest) ++ var hrefBuf bytes.Buffer ++ hrefBuf.WriteString("href=\"") ++ escLink(&hrefBuf, dest) ++ hrefBuf.WriteByte('"') ++ attrs = append(attrs, hrefBuf.String()) ++ if node.NoteID != 0 { ++ r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) ++ break ++ } ++ attrs = appendLinkAttrs(attrs, r.Flags, dest) ++ if len(node.LinkData.Title) > 0 { ++ var titleBuff bytes.Buffer ++ titleBuff.WriteString("title=\"") ++ escapeHTML(&titleBuff, node.LinkData.Title) ++ titleBuff.WriteByte('"') ++ attrs = append(attrs, titleBuff.String()) ++ } ++ r.tag(w, aTag, attrs) ++ } else { ++ if node.NoteID != 0 { ++ break ++ } ++ r.out(w, aCloseTag) ++ } ++ } ++ case Image: ++ if r.Flags&SkipImages != 0 { ++ return SkipChildren ++ } ++ if entering { ++ dest := node.LinkData.Destination ++ dest = r.addAbsPrefix(dest) ++ if r.disableTags == 0 { ++ //if options.safe && potentiallyUnsafe(dest) { ++ //out(w, ``)
++				//} else {
++				r.out(w, []byte(`<img src=`)) ++ } ++ } ++ case Code: ++ r.out(w, codeTag) ++ escapeAllHTML(w, node.Literal) ++ r.out(w, codeCloseTag) ++ case Document: ++ break ++ case Paragraph: ++ if skipParagraphTags(node) { ++ break ++ } ++ if entering { ++ // TODO: untangle this clusterfuck about when the newlines need ++ // to be added and when not. ++ if node.Prev != nil { ++ switch node.Prev.Type { ++ case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: ++ r.cr(w) ++ } ++ } ++ if node.Parent.Type == BlockQuote && node.Prev == nil { ++ r.cr(w) ++ } ++ r.out(w, pTag) ++ } else { ++ r.out(w, pCloseTag) ++ if !(node.Parent.Type == Item && node.Next == nil) { ++ r.cr(w) ++ } ++ } ++ case BlockQuote: ++ if entering { ++ r.cr(w) ++ r.out(w, blockquoteTag) ++ } else { ++ r.out(w, blockquoteCloseTag) ++ r.cr(w) ++ } ++ case HTMLBlock: ++ if r.Flags&SkipHTML != 0 { ++ break ++ } ++ r.cr(w) ++ r.out(w, node.Literal) ++ r.cr(w) ++ case Heading: ++ headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level ++ openTag, closeTag := headingTagsFromLevel(headingLevel) ++ if entering { ++ if node.IsTitleblock { ++ attrs = append(attrs, `class="title"`) ++ } ++ if node.HeadingID != "" { ++ id := r.ensureUniqueHeadingID(node.HeadingID) ++ if r.HeadingIDPrefix != "" { ++ id = r.HeadingIDPrefix + id ++ } ++ if r.HeadingIDSuffix != "" { ++ id = id + r.HeadingIDSuffix ++ } ++ attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) ++ } ++ r.cr(w) ++ r.tag(w, openTag, attrs) ++ } else { ++ r.out(w, closeTag) ++ if !(node.Parent.Type == Item && node.Next == nil) { ++ r.cr(w) ++ } ++ } ++ case HorizontalRule: ++ r.cr(w) ++ r.outHRTag(w) ++ r.cr(w) ++ case List: ++ openTag := ulTag ++ closeTag := ulCloseTag ++ if node.ListFlags&ListTypeOrdered != 0 { ++ openTag = olTag ++ closeTag = olCloseTag ++ } ++ if node.ListFlags&ListTypeDefinition != 0 { ++ openTag = dlTag ++ closeTag = dlCloseTag ++ } ++ if entering { ++ if node.IsFootnotesList { ++ r.out(w, footnotesDivBytes) ++ r.outHRTag(w) ++ r.cr(w) ++ } ++ r.cr(w) ++ if node.Parent.Type == Item && node.Parent.Parent.Tight { ++ r.cr(w) ++ } ++ r.tag(w, openTag[:len(openTag)-1], attrs) ++ r.cr(w) ++ } else { ++ r.out(w, closeTag) ++ //cr(w) ++ //if node.parent.Type != Item { ++ // cr(w) ++ //} ++ if node.Parent.Type == Item && node.Next != nil { ++ r.cr(w) ++ } ++ if node.Parent.Type == Document || node.Parent.Type == BlockQuote { ++ r.cr(w) ++ } ++ if node.IsFootnotesList { ++ r.out(w, footnotesCloseDivBytes) ++ } ++ } ++ case Item: ++ openTag := liTag ++ closeTag := liCloseTag ++ if node.ListFlags&ListTypeDefinition != 0 { ++ openTag = ddTag ++ closeTag = ddCloseTag ++ } ++ if node.ListFlags&ListTypeTerm != 0 { ++ openTag = dtTag ++ closeTag = dtCloseTag ++ } ++ if entering { ++ if itemOpenCR(node) { ++ r.cr(w) ++ } ++ if node.ListData.RefLink != nil { ++ slug := slugify(node.ListData.RefLink) ++ r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) ++ break ++ } ++ r.out(w, openTag) ++ } else { ++ if node.ListData.RefLink != nil { ++ slug := slugify(node.ListData.RefLink) ++ if r.Flags&FootnoteReturnLinks != 0 { ++ r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) ++ } ++ } ++ r.out(w, closeTag) ++ r.cr(w) ++ } ++ case CodeBlock: ++ attrs = appendLanguageAttr(attrs, node.Info) ++ r.cr(w) ++ r.out(w, preTag) ++ r.tag(w, codeTag[:len(codeTag)-1], attrs) ++ escapeAllHTML(w, node.Literal) ++ r.out(w, codeCloseTag) ++ r.out(w, preCloseTag) ++ if node.Parent.Type != Item { ++ r.cr(w) ++ } ++ case Table: ++ if entering { ++ r.cr(w) ++ r.out(w, tableTag) ++ } else { ++ r.out(w, tableCloseTag) ++ r.cr(w) ++ } ++ case TableCell: ++ openTag := tdTag ++ closeTag := tdCloseTag ++ if node.IsHeader { ++ openTag = thTag ++ closeTag = thCloseTag ++ } ++ if entering { ++ align := cellAlignment(node.Align) ++ if align != "" { ++ attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) ++ } ++ if node.Prev == nil { ++ r.cr(w) ++ } ++ r.tag(w, openTag, attrs) ++ } else { ++ r.out(w, closeTag) ++ r.cr(w) ++ } ++ case TableHead: ++ if entering { ++ r.cr(w) ++ r.out(w, theadTag) ++ } else { ++ r.out(w, theadCloseTag) ++ r.cr(w) ++ } ++ case TableBody: ++ if entering { ++ r.cr(w) ++ r.out(w, tbodyTag) ++ // XXX: this is to adhere to a rather silly test. Should fix test. ++ if node.FirstChild == nil { ++ r.cr(w) ++ } ++ } else { ++ r.out(w, tbodyCloseTag) ++ r.cr(w) ++ } ++ case TableRow: ++ if entering { ++ r.cr(w) ++ r.out(w, trTag) ++ } else { ++ r.out(w, trCloseTag) ++ r.cr(w) ++ } ++ default: ++ panic("Unknown node type " + node.Type.String()) ++ } ++ return GoToNext ++} ++ ++// RenderHeader writes HTML document preamble and TOC if requested. ++func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { ++ r.writeDocumentHeader(w) ++ if r.Flags&TOC != 0 { ++ r.writeTOC(w, ast) ++ } ++} ++ ++// RenderFooter writes HTML document footer. ++func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { ++ if r.Flags&CompletePage == 0 { ++ return ++ } ++ io.WriteString(w, "\n\n\n") ++} ++ ++func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { ++ if r.Flags&CompletePage == 0 { ++ return ++ } ++ ending := "" ++ if r.Flags&UseXHTML != 0 { ++ io.WriteString(w, "\n") ++ io.WriteString(w, "\n") ++ ending = " /" ++ } else { ++ io.WriteString(w, "\n") ++ io.WriteString(w, "\n") ++ } ++ io.WriteString(w, "\n") ++ io.WriteString(w, " ") ++ if r.Flags&Smartypants != 0 { ++ r.sr.Process(w, []byte(r.Title)) ++ } else { ++ escapeHTML(w, []byte(r.Title)) ++ } ++ io.WriteString(w, "\n") ++ io.WriteString(w, " \n") ++ io.WriteString(w, " \n") ++ if r.CSS != "" { ++ io.WriteString(w, " \n") ++ } ++ if r.Icon != "" { ++ io.WriteString(w, " \n") ++ } ++ io.WriteString(w, "\n") ++ io.WriteString(w, "\n\n") ++} ++ ++func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { ++ buf := bytes.Buffer{} ++ ++ inHeading := false ++ tocLevel := 0 ++ headingCount := 0 ++ ++ ast.Walk(func(node *Node, entering bool) WalkStatus { ++ if node.Type == Heading && !node.HeadingData.IsTitleblock { ++ inHeading = entering ++ if entering { ++ node.HeadingID = fmt.Sprintf("toc_%d", headingCount) ++ if node.Level == tocLevel { ++ buf.WriteString("\n\n
  • ") ++ } else if node.Level < tocLevel { ++ for node.Level < tocLevel { ++ tocLevel-- ++ buf.WriteString("
  • \n") ++ } ++ buf.WriteString("\n\n
  • ") ++ } else { ++ for node.Level > tocLevel { ++ tocLevel++ ++ buf.WriteString("\n") ++ } ++ ++ if buf.Len() > 0 { ++ io.WriteString(w, "\n") ++ } ++ r.lastOutputLen = buf.Len() ++} +diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go +new file mode 100644 +index 000000000000..d45bd941726e +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/inline.go +@@ -0,0 +1,1228 @@ ++// ++// Blackfriday Markdown Processor ++// Available at http://github.com/russross/blackfriday ++// ++// Copyright © 2011 Russ Ross . ++// Distributed under the Simplified BSD License. ++// See README.md for details. ++// ++ ++// ++// Functions to parse inline elements. ++// ++ ++package blackfriday ++ ++import ( ++ "bytes" ++ "regexp" ++ "strconv" ++) ++ ++var ( ++ urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` ++ anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) ++ ++ // https://www.w3.org/TR/html5/syntax.html#character-references ++ // highest unicode code point in 17 planes (2^20): 1,114,112d = ++ // 7 dec digits or 6 hex digits ++ // named entity references can be 2-31 characters with stuff like < ++ // at one end and ∳ at the other. There ++ // are also sometimes numbers at the end, although this isn't inherent ++ // in the specification; there are never numbers anywhere else in ++ // current character references, though; see ¾ and ▒, etc. ++ // https://www.w3.org/TR/html5/syntax.html#named-character-references ++ // ++ // entity := "&" (named group | number ref) ";" ++ // named group := [a-zA-Z]{2,31}[0-9]{0,2} ++ // number ref := "#" (dec ref | hex ref) ++ // dec ref := [0-9]{1,7} ++ // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} ++ htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) ++) ++ ++// Functions to parse text within a block ++// Each function returns the number of chars taken care of ++// data is the complete block being rendered ++// offset is the number of valid chars before the current cursor ++ ++func (p *Markdown) inline(currBlock *Node, data []byte) { ++ // handlers might call us recursively: enforce a maximum depth ++ if p.nesting >= p.maxNesting || len(data) == 0 { ++ return ++ } ++ p.nesting++ ++ beg, end := 0, 0 ++ for end < len(data) { ++ handler := p.inlineCallback[data[end]] ++ if handler != nil { ++ if consumed, node := handler(p, data, end); consumed == 0 { ++ // No action from the callback. ++ end++ ++ } else { ++ // Copy inactive chars into the output. ++ currBlock.AppendChild(text(data[beg:end])) ++ if node != nil { ++ currBlock.AppendChild(node) ++ } ++ // Skip past whatever the callback used. ++ beg = end + consumed ++ end = beg ++ } ++ } else { ++ end++ ++ } ++ } ++ if beg < len(data) { ++ if data[end-1] == '\n' { ++ end-- ++ } ++ currBlock.AppendChild(text(data[beg:end])) ++ } ++ p.nesting-- ++} ++ ++// single and double emphasis parsing ++func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { ++ data = data[offset:] ++ c := data[0] ++ ++ if len(data) > 2 && data[1] != c { ++ // whitespace cannot follow an opening emphasis; ++ // strikethrough only takes two characters '~~' ++ if c == '~' || isspace(data[1]) { ++ return 0, nil ++ } ++ ret, node := helperEmphasis(p, data[1:], c) ++ if ret == 0 { ++ return 0, nil ++ } ++ ++ return ret + 1, node ++ } ++ ++ if len(data) > 3 && data[1] == c && data[2] != c { ++ if isspace(data[2]) { ++ return 0, nil ++ } ++ ret, node := helperDoubleEmphasis(p, data[2:], c) ++ if ret == 0 { ++ return 0, nil ++ } ++ ++ return ret + 2, node ++ } ++ ++ if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { ++ if c == '~' || isspace(data[3]) { ++ return 0, nil ++ } ++ ret, node := helperTripleEmphasis(p, data, 3, c) ++ if ret == 0 { ++ return 0, nil ++ } ++ ++ return ret + 3, node ++ } ++ ++ return 0, nil ++} ++ ++func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { ++ data = data[offset:] ++ ++ nb := 0 ++ ++ // count the number of backticks in the delimiter ++ for nb < len(data) && data[nb] == '`' { ++ nb++ ++ } ++ ++ // find the next delimiter ++ i, end := 0, 0 ++ for end = nb; end < len(data) && i < nb; end++ { ++ if data[end] == '`' { ++ i++ ++ } else { ++ i = 0 ++ } ++ } ++ ++ // no matching delimiter? ++ if i < nb && end >= len(data) { ++ return 0, nil ++ } ++ ++ // trim outside whitespace ++ fBegin := nb ++ for fBegin < end && data[fBegin] == ' ' { ++ fBegin++ ++ } ++ ++ fEnd := end - nb ++ for fEnd > fBegin && data[fEnd-1] == ' ' { ++ fEnd-- ++ } ++ ++ // render the code span ++ if fBegin != fEnd { ++ code := NewNode(Code) ++ code.Literal = data[fBegin:fEnd] ++ return end, code ++ } ++ ++ return end, nil ++} ++ ++// newline preceded by two spaces becomes
    ++func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { ++ origOffset := offset ++ for offset < len(data) && data[offset] == ' ' { ++ offset++ ++ } ++ ++ if offset < len(data) && data[offset] == '\n' { ++ if offset-origOffset >= 2 { ++ return offset - origOffset + 1, NewNode(Hardbreak) ++ } ++ return offset - origOffset, nil ++ } ++ return 0, nil ++} ++ ++// newline without two spaces works when HardLineBreak is enabled ++func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { ++ if p.extensions&HardLineBreak != 0 { ++ return 1, NewNode(Hardbreak) ++ } ++ return 0, nil ++} ++ ++type linkType int ++ ++const ( ++ linkNormal linkType = iota ++ linkImg ++ linkDeferredFootnote ++ linkInlineFootnote ++) ++ ++func isReferenceStyleLink(data []byte, pos int, t linkType) bool { ++ if t == linkDeferredFootnote { ++ return false ++ } ++ return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' ++} ++ ++func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { ++ if offset < len(data)-1 && data[offset+1] == '[' { ++ return link(p, data, offset) ++ } ++ return 0, nil ++} ++ ++func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { ++ if offset < len(data)-1 && data[offset+1] == '[' { ++ return link(p, data, offset) ++ } ++ return 0, nil ++} ++ ++// '[': parse a link or an image or a footnote ++func link(p *Markdown, data []byte, offset int) (int, *Node) { ++ // no links allowed inside regular links, footnote, and deferred footnotes ++ if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { ++ return 0, nil ++ } ++ ++ var t linkType ++ switch { ++ // special case: ![^text] == deferred footnote (that follows something with ++ // an exclamation point) ++ case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': ++ t = linkDeferredFootnote ++ // ![alt] == image ++ case offset >= 0 && data[offset] == '!': ++ t = linkImg ++ offset++ ++ // ^[text] == inline footnote ++ // [^refId] == deferred footnote ++ case p.extensions&Footnotes != 0: ++ if offset >= 0 && data[offset] == '^' { ++ t = linkInlineFootnote ++ offset++ ++ } else if len(data)-1 > offset && data[offset+1] == '^' { ++ t = linkDeferredFootnote ++ } ++ // [text] == regular link ++ default: ++ t = linkNormal ++ } ++ ++ data = data[offset:] ++ ++ var ( ++ i = 1 ++ noteID int ++ title, link, altContent []byte ++ textHasNl = false ++ ) ++ ++ if t == linkDeferredFootnote { ++ i++ ++ } ++ ++ // look for the matching closing bracket ++ for level := 1; level > 0 && i < len(data); i++ { ++ switch { ++ case data[i] == '\n': ++ textHasNl = true ++ ++ case isBackslashEscaped(data, i): ++ continue ++ ++ case data[i] == '[': ++ level++ ++ ++ case data[i] == ']': ++ level-- ++ if level <= 0 { ++ i-- // compensate for extra i++ in for loop ++ } ++ } ++ } ++ ++ if i >= len(data) { ++ return 0, nil ++ } ++ ++ txtE := i ++ i++ ++ var footnoteNode *Node ++ ++ // skip any amount of whitespace or newline ++ // (this is much more lax than original markdown syntax) ++ for i < len(data) && isspace(data[i]) { ++ i++ ++ } ++ ++ // inline style link ++ switch { ++ case i < len(data) && data[i] == '(': ++ // skip initial whitespace ++ i++ ++ ++ for i < len(data) && isspace(data[i]) { ++ i++ ++ } ++ ++ linkB := i ++ ++ // look for link end: ' " ) ++ findlinkend: ++ for i < len(data) { ++ switch { ++ case data[i] == '\\': ++ i += 2 ++ ++ case data[i] == ')' || data[i] == '\'' || data[i] == '"': ++ break findlinkend ++ ++ default: ++ i++ ++ } ++ } ++ ++ if i >= len(data) { ++ return 0, nil ++ } ++ linkE := i ++ ++ // look for title end if present ++ titleB, titleE := 0, 0 ++ if data[i] == '\'' || data[i] == '"' { ++ i++ ++ titleB = i ++ ++ findtitleend: ++ for i < len(data) { ++ switch { ++ case data[i] == '\\': ++ i += 2 ++ ++ case data[i] == ')': ++ break findtitleend ++ ++ default: ++ i++ ++ } ++ } ++ ++ if i >= len(data) { ++ return 0, nil ++ } ++ ++ // skip whitespace after title ++ titleE = i - 1 ++ for titleE > titleB && isspace(data[titleE]) { ++ titleE-- ++ } ++ ++ // check for closing quote presence ++ if data[titleE] != '\'' && data[titleE] != '"' { ++ titleB, titleE = 0, 0 ++ linkE = i ++ } ++ } ++ ++ // remove whitespace at the end of the link ++ for linkE > linkB && isspace(data[linkE-1]) { ++ linkE-- ++ } ++ ++ // remove optional angle brackets around the link ++ if data[linkB] == '<' { ++ linkB++ ++ } ++ if data[linkE-1] == '>' { ++ linkE-- ++ } ++ ++ // build escaped link and title ++ if linkE > linkB { ++ link = data[linkB:linkE] ++ } ++ ++ if titleE > titleB { ++ title = data[titleB:titleE] ++ } ++ ++ i++ ++ ++ // reference style link ++ case isReferenceStyleLink(data, i, t): ++ var id []byte ++ altContentConsidered := false ++ ++ // look for the id ++ i++ ++ linkB := i ++ for i < len(data) && data[i] != ']' { ++ i++ ++ } ++ if i >= len(data) { ++ return 0, nil ++ } ++ linkE := i ++ ++ // find the reference ++ if linkB == linkE { ++ if textHasNl { ++ var b bytes.Buffer ++ ++ for j := 1; j < txtE; j++ { ++ switch { ++ case data[j] != '\n': ++ b.WriteByte(data[j]) ++ case data[j-1] != ' ': ++ b.WriteByte(' ') ++ } ++ } ++ ++ id = b.Bytes() ++ } else { ++ id = data[1:txtE] ++ altContentConsidered = true ++ } ++ } else { ++ id = data[linkB:linkE] ++ } ++ ++ // find the reference with matching id ++ lr, ok := p.getRef(string(id)) ++ if !ok { ++ return 0, nil ++ } ++ ++ // keep link and title from reference ++ link = lr.link ++ title = lr.title ++ if altContentConsidered { ++ altContent = lr.text ++ } ++ i++ ++ ++ // shortcut reference style link or reference or inline footnote ++ default: ++ var id []byte ++ ++ // craft the id ++ if textHasNl { ++ var b bytes.Buffer ++ ++ for j := 1; j < txtE; j++ { ++ switch { ++ case data[j] != '\n': ++ b.WriteByte(data[j]) ++ case data[j-1] != ' ': ++ b.WriteByte(' ') ++ } ++ } ++ ++ id = b.Bytes() ++ } else { ++ if t == linkDeferredFootnote { ++ id = data[2:txtE] // get rid of the ^ ++ } else { ++ id = data[1:txtE] ++ } ++ } ++ ++ footnoteNode = NewNode(Item) ++ if t == linkInlineFootnote { ++ // create a new reference ++ noteID = len(p.notes) + 1 ++ ++ var fragment []byte ++ if len(id) > 0 { ++ if len(id) < 16 { ++ fragment = make([]byte, len(id)) ++ } else { ++ fragment = make([]byte, 16) ++ } ++ copy(fragment, slugify(id)) ++ } else { ++ fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) ++ } ++ ++ ref := &reference{ ++ noteID: noteID, ++ hasBlock: false, ++ link: fragment, ++ title: id, ++ footnote: footnoteNode, ++ } ++ ++ p.notes = append(p.notes, ref) ++ ++ link = ref.link ++ title = ref.title ++ } else { ++ // find the reference with matching id ++ lr, ok := p.getRef(string(id)) ++ if !ok { ++ return 0, nil ++ } ++ ++ if t == linkDeferredFootnote { ++ lr.noteID = len(p.notes) + 1 ++ lr.footnote = footnoteNode ++ p.notes = append(p.notes, lr) ++ } ++ ++ // keep link and title from reference ++ link = lr.link ++ // if inline footnote, title == footnote contents ++ title = lr.title ++ noteID = lr.noteID ++ } ++ ++ // rewind the whitespace ++ i = txtE + 1 ++ } ++ ++ var uLink []byte ++ if t == linkNormal || t == linkImg { ++ if len(link) > 0 { ++ var uLinkBuf bytes.Buffer ++ unescapeText(&uLinkBuf, link) ++ uLink = uLinkBuf.Bytes() ++ } ++ ++ // links need something to click on and somewhere to go ++ if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { ++ return 0, nil ++ } ++ } ++ ++ // call the relevant rendering function ++ var linkNode *Node ++ switch t { ++ case linkNormal: ++ linkNode = NewNode(Link) ++ linkNode.Destination = normalizeURI(uLink) ++ linkNode.Title = title ++ if len(altContent) > 0 { ++ linkNode.AppendChild(text(altContent)) ++ } else { ++ // links cannot contain other links, so turn off link parsing ++ // temporarily and recurse ++ insideLink := p.insideLink ++ p.insideLink = true ++ p.inline(linkNode, data[1:txtE]) ++ p.insideLink = insideLink ++ } ++ ++ case linkImg: ++ linkNode = NewNode(Image) ++ linkNode.Destination = uLink ++ linkNode.Title = title ++ linkNode.AppendChild(text(data[1:txtE])) ++ i++ ++ ++ case linkInlineFootnote, linkDeferredFootnote: ++ linkNode = NewNode(Link) ++ linkNode.Destination = link ++ linkNode.Title = title ++ linkNode.NoteID = noteID ++ linkNode.Footnote = footnoteNode ++ if t == linkInlineFootnote { ++ i++ ++ } ++ ++ default: ++ return 0, nil ++ } ++ ++ return i, linkNode ++} ++ ++func (p *Markdown) inlineHTMLComment(data []byte) int { ++ if len(data) < 5 { ++ return 0 ++ } ++ if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { ++ return 0 ++ } ++ i := 5 ++ // scan for an end-of-comment marker, across lines if necessary ++ for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { ++ i++ ++ } ++ // no end-of-comment marker ++ if i >= len(data) { ++ return 0 ++ } ++ return i + 1 ++} ++ ++func stripMailto(link []byte) []byte { ++ if bytes.HasPrefix(link, []byte("mailto://")) { ++ return link[9:] ++ } else if bytes.HasPrefix(link, []byte("mailto:")) { ++ return link[7:] ++ } else { ++ return link ++ } ++} ++ ++// autolinkType specifies a kind of autolink that gets detected. ++type autolinkType int ++ ++// These are the possible flag values for the autolink renderer. ++const ( ++ notAutolink autolinkType = iota ++ normalAutolink ++ emailAutolink ++) ++ ++// '<' when tags or autolinks are allowed ++func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { ++ data = data[offset:] ++ altype, end := tagLength(data) ++ if size := p.inlineHTMLComment(data); size > 0 { ++ end = size ++ } ++ if end > 2 { ++ if altype != notAutolink { ++ var uLink bytes.Buffer ++ unescapeText(&uLink, data[1:end+1-2]) ++ if uLink.Len() > 0 { ++ link := uLink.Bytes() ++ node := NewNode(Link) ++ node.Destination = link ++ if altype == emailAutolink { ++ node.Destination = append([]byte("mailto:"), link...) ++ } ++ node.AppendChild(text(stripMailto(link))) ++ return end, node ++ } ++ } else { ++ htmlTag := NewNode(HTMLSpan) ++ htmlTag.Literal = data[:end] ++ return end, htmlTag ++ } ++ } ++ ++ return end, nil ++} ++ ++// '\\' backslash escape ++var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") ++ ++func escape(p *Markdown, data []byte, offset int) (int, *Node) { ++ data = data[offset:] ++ ++ if len(data) > 1 { ++ if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { ++ return 2, NewNode(Hardbreak) ++ } ++ if bytes.IndexByte(escapeChars, data[1]) < 0 { ++ return 0, nil ++ } ++ ++ return 2, text(data[1:2]) ++ } ++ ++ return 2, nil ++} ++ ++func unescapeText(ob *bytes.Buffer, src []byte) { ++ i := 0 ++ for i < len(src) { ++ org := i ++ for i < len(src) && src[i] != '\\' { ++ i++ ++ } ++ ++ if i > org { ++ ob.Write(src[org:i]) ++ } ++ ++ if i+1 >= len(src) { ++ break ++ } ++ ++ ob.WriteByte(src[i+1]) ++ i += 2 ++ } ++} ++ ++// '&' escaped when it doesn't belong to an entity ++// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; ++func entity(p *Markdown, data []byte, offset int) (int, *Node) { ++ data = data[offset:] ++ ++ end := 1 ++ ++ if end < len(data) && data[end] == '#' { ++ end++ ++ } ++ ++ for end < len(data) && isalnum(data[end]) { ++ end++ ++ } ++ ++ if end < len(data) && data[end] == ';' { ++ end++ // real entity ++ } else { ++ return 0, nil // lone '&' ++ } ++ ++ ent := data[:end] ++ // undo & escaping or it will be converted to &amp; by another ++ // escaper in the renderer ++ if bytes.Equal(ent, []byte("&")) { ++ ent = []byte{'&'} ++ } ++ ++ return end, text(ent) ++} ++ ++func linkEndsWithEntity(data []byte, linkEnd int) bool { ++ entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) ++ return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd ++} ++ ++// hasPrefixCaseInsensitive is a custom implementation of ++// strings.HasPrefix(strings.ToLower(s), prefix) ++// we rolled our own because ToLower pulls in a huge machinery of lowercasing ++// anything from Unicode and that's very slow. Since this func will only be ++// used on ASCII protocol prefixes, we can take shortcuts. ++func hasPrefixCaseInsensitive(s, prefix []byte) bool { ++ if len(s) < len(prefix) { ++ return false ++ } ++ delta := byte('a' - 'A') ++ for i, b := range prefix { ++ if b != s[i] && b != s[i]+delta { ++ return false ++ } ++ } ++ return true ++} ++ ++var protocolPrefixes = [][]byte{ ++ []byte("http://"), ++ []byte("https://"), ++ []byte("ftp://"), ++ []byte("file://"), ++ []byte("mailto:"), ++} ++ ++const shortestPrefix = 6 // len("ftp://"), the shortest of the above ++ ++func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { ++ // quick check to rule out most false hits ++ if p.insideLink || len(data) < offset+shortestPrefix { ++ return 0, nil ++ } ++ for _, prefix := range protocolPrefixes { ++ endOfHead := offset + 8 // 8 is the len() of the longest prefix ++ if endOfHead > len(data) { ++ endOfHead = len(data) ++ } ++ if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { ++ return autoLink(p, data, offset) ++ } ++ } ++ return 0, nil ++} ++ ++func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { ++ // Now a more expensive check to see if we're not inside an anchor element ++ anchorStart := offset ++ offsetFromAnchor := 0 ++ for anchorStart > 0 && data[anchorStart] != '<' { ++ anchorStart-- ++ offsetFromAnchor++ ++ } ++ ++ anchorStr := anchorRe.Find(data[anchorStart:]) ++ if anchorStr != nil { ++ anchorClose := NewNode(HTMLSpan) ++ anchorClose.Literal = anchorStr[offsetFromAnchor:] ++ return len(anchorStr) - offsetFromAnchor, anchorClose ++ } ++ ++ // scan backward for a word boundary ++ rewind := 0 ++ for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { ++ rewind++ ++ } ++ if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters ++ return 0, nil ++ } ++ ++ origData := data ++ data = data[offset-rewind:] ++ ++ if !isSafeLink(data) { ++ return 0, nil ++ } ++ ++ linkEnd := 0 ++ for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { ++ linkEnd++ ++ } ++ ++ // Skip punctuation at the end of the link ++ if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { ++ linkEnd-- ++ } ++ ++ // But don't skip semicolon if it's a part of escaped entity: ++ if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { ++ linkEnd-- ++ } ++ ++ // See if the link finishes with a punctuation sign that can be closed. ++ var copen byte ++ switch data[linkEnd-1] { ++ case '"': ++ copen = '"' ++ case '\'': ++ copen = '\'' ++ case ')': ++ copen = '(' ++ case ']': ++ copen = '[' ++ case '}': ++ copen = '{' ++ default: ++ copen = 0 ++ } ++ ++ if copen != 0 { ++ bufEnd := offset - rewind + linkEnd - 2 ++ ++ openDelim := 1 ++ ++ /* Try to close the final punctuation sign in this same line; ++ * if we managed to close it outside of the URL, that means that it's ++ * not part of the URL. If it closes inside the URL, that means it ++ * is part of the URL. ++ * ++ * Examples: ++ * ++ * foo http://www.pokemon.com/Pikachu_(Electric) bar ++ * => http://www.pokemon.com/Pikachu_(Electric) ++ * ++ * foo (http://www.pokemon.com/Pikachu_(Electric)) bar ++ * => http://www.pokemon.com/Pikachu_(Electric) ++ * ++ * foo http://www.pokemon.com/Pikachu_(Electric)) bar ++ * => http://www.pokemon.com/Pikachu_(Electric)) ++ * ++ * (foo http://www.pokemon.com/Pikachu_(Electric)) bar ++ * => foo http://www.pokemon.com/Pikachu_(Electric) ++ */ ++ ++ for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { ++ if origData[bufEnd] == data[linkEnd-1] { ++ openDelim++ ++ } ++ ++ if origData[bufEnd] == copen { ++ openDelim-- ++ } ++ ++ bufEnd-- ++ } ++ ++ if openDelim == 0 { ++ linkEnd-- ++ } ++ } ++ ++ var uLink bytes.Buffer ++ unescapeText(&uLink, data[:linkEnd]) ++ ++ if uLink.Len() > 0 { ++ node := NewNode(Link) ++ node.Destination = uLink.Bytes() ++ node.AppendChild(text(uLink.Bytes())) ++ return linkEnd, node ++ } ++ ++ return linkEnd, nil ++} ++ ++func isEndOfLink(char byte) bool { ++ return isspace(char) || char == '<' ++} ++ ++var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} ++var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} ++ ++func isSafeLink(link []byte) bool { ++ for _, path := range validPaths { ++ if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { ++ if len(link) == len(path) { ++ return true ++ } else if isalnum(link[len(path)]) { ++ return true ++ } ++ } ++ } ++ ++ for _, prefix := range validUris { ++ // TODO: handle unicode here ++ // case-insensitive prefix test ++ if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { ++ return true ++ } ++ } ++ ++ return false ++} ++ ++// return the length of the given tag, or 0 is it's not valid ++func tagLength(data []byte) (autolink autolinkType, end int) { ++ var i, j int ++ ++ // a valid tag can't be shorter than 3 chars ++ if len(data) < 3 { ++ return notAutolink, 0 ++ } ++ ++ // begins with a '<' optionally followed by '/', followed by letter or number ++ if data[0] != '<' { ++ return notAutolink, 0 ++ } ++ if data[1] == '/' { ++ i = 2 ++ } else { ++ i = 1 ++ } ++ ++ if !isalnum(data[i]) { ++ return notAutolink, 0 ++ } ++ ++ // scheme test ++ autolink = notAutolink ++ ++ // try to find the beginning of an URI ++ for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { ++ i++ ++ } ++ ++ if i > 1 && i < len(data) && data[i] == '@' { ++ if j = isMailtoAutoLink(data[i:]); j != 0 { ++ return emailAutolink, i + j ++ } ++ } ++ ++ if i > 2 && i < len(data) && data[i] == ':' { ++ autolink = normalAutolink ++ i++ ++ } ++ ++ // complete autolink test: no whitespace or ' or " ++ switch { ++ case i >= len(data): ++ autolink = notAutolink ++ case autolink != notAutolink: ++ j = i ++ ++ for i < len(data) { ++ if data[i] == '\\' { ++ i += 2 ++ } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { ++ break ++ } else { ++ i++ ++ } ++ ++ } ++ ++ if i >= len(data) { ++ return autolink, 0 ++ } ++ if i > j && data[i] == '>' { ++ return autolink, i + 1 ++ } ++ ++ // one of the forbidden chars has been found ++ autolink = notAutolink ++ } ++ i += bytes.IndexByte(data[i:], '>') ++ if i < 0 { ++ return autolink, 0 ++ } ++ return autolink, i + 1 ++} ++ ++// look for the address part of a mail autolink and '>' ++// this is less strict than the original markdown e-mail address matching ++func isMailtoAutoLink(data []byte) int { ++ nb := 0 ++ ++ // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' ++ for i := 0; i < len(data); i++ { ++ if isalnum(data[i]) { ++ continue ++ } ++ ++ switch data[i] { ++ case '@': ++ nb++ ++ ++ case '-', '.', '_': ++ break ++ ++ case '>': ++ if nb == 1 { ++ return i + 1 ++ } ++ return 0 ++ default: ++ return 0 ++ } ++ } ++ ++ return 0 ++} ++ ++// look for the next emph char, skipping other constructs ++func helperFindEmphChar(data []byte, c byte) int { ++ i := 0 ++ ++ for i < len(data) { ++ for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { ++ i++ ++ } ++ if i >= len(data) { ++ return 0 ++ } ++ // do not count escaped chars ++ if i != 0 && data[i-1] == '\\' { ++ i++ ++ continue ++ } ++ if data[i] == c { ++ return i ++ } ++ ++ if data[i] == '`' { ++ // skip a code span ++ tmpI := 0 ++ i++ ++ for i < len(data) && data[i] != '`' { ++ if tmpI == 0 && data[i] == c { ++ tmpI = i ++ } ++ i++ ++ } ++ if i >= len(data) { ++ return tmpI ++ } ++ i++ ++ } else if data[i] == '[' { ++ // skip a link ++ tmpI := 0 ++ i++ ++ for i < len(data) && data[i] != ']' { ++ if tmpI == 0 && data[i] == c { ++ tmpI = i ++ } ++ i++ ++ } ++ i++ ++ for i < len(data) && (data[i] == ' ' || data[i] == '\n') { ++ i++ ++ } ++ if i >= len(data) { ++ return tmpI ++ } ++ if data[i] != '[' && data[i] != '(' { // not a link ++ if tmpI > 0 { ++ return tmpI ++ } ++ continue ++ } ++ cc := data[i] ++ i++ ++ for i < len(data) && data[i] != cc { ++ if tmpI == 0 && data[i] == c { ++ return i ++ } ++ i++ ++ } ++ if i >= len(data) { ++ return tmpI ++ } ++ i++ ++ } ++ } ++ return 0 ++} ++ ++func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { ++ i := 0 ++ ++ // skip one symbol if coming from emph3 ++ if len(data) > 1 && data[0] == c && data[1] == c { ++ i = 1 ++ } ++ ++ for i < len(data) { ++ length := helperFindEmphChar(data[i:], c) ++ if length == 0 { ++ return 0, nil ++ } ++ i += length ++ if i >= len(data) { ++ return 0, nil ++ } ++ ++ if i+1 < len(data) && data[i+1] == c { ++ i++ ++ continue ++ } ++ ++ if data[i] == c && !isspace(data[i-1]) { ++ ++ if p.extensions&NoIntraEmphasis != 0 { ++ if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { ++ continue ++ } ++ } ++ ++ emph := NewNode(Emph) ++ p.inline(emph, data[:i]) ++ return i + 1, emph ++ } ++ } ++ ++ return 0, nil ++} ++ ++func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { ++ i := 0 ++ ++ for i < len(data) { ++ length := helperFindEmphChar(data[i:], c) ++ if length == 0 { ++ return 0, nil ++ } ++ i += length ++ ++ if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { ++ nodeType := Strong ++ if c == '~' { ++ nodeType = Del ++ } ++ node := NewNode(nodeType) ++ p.inline(node, data[:i]) ++ return i + 2, node ++ } ++ i++ ++ } ++ return 0, nil ++} ++ ++func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { ++ i := 0 ++ origData := data ++ data = data[offset:] ++ ++ for i < len(data) { ++ length := helperFindEmphChar(data[i:], c) ++ if length == 0 { ++ return 0, nil ++ } ++ i += length ++ ++ // skip whitespace preceded symbols ++ if data[i] != c || isspace(data[i-1]) { ++ continue ++ } ++ ++ switch { ++ case i+2 < len(data) && data[i+1] == c && data[i+2] == c: ++ // triple symbol found ++ strong := NewNode(Strong) ++ em := NewNode(Emph) ++ strong.AppendChild(em) ++ p.inline(em, data[:i]) ++ return i + 3, strong ++ case (i+1 < len(data) && data[i+1] == c): ++ // double symbol found, hand over to emph1 ++ length, node := helperEmphasis(p, origData[offset-2:], c) ++ if length == 0 { ++ return 0, nil ++ } ++ return length - 2, node ++ default: ++ // single symbol found, hand over to emph2 ++ length, node := helperDoubleEmphasis(p, origData[offset-1:], c) ++ if length == 0 { ++ return 0, nil ++ } ++ return length - 1, node ++ } ++ } ++ return 0, nil ++} ++ ++func text(s []byte) *Node { ++ node := NewNode(Text) ++ node.Literal = s ++ return node ++} ++ ++func normalizeURI(s []byte) []byte { ++ return s // TODO: implement ++} +diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go +new file mode 100644 +index 000000000000..58d2e4538c62 +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/markdown.go +@@ -0,0 +1,950 @@ ++// Blackfriday Markdown Processor ++// Available at http://github.com/russross/blackfriday ++// ++// Copyright © 2011 Russ Ross . ++// Distributed under the Simplified BSD License. ++// See README.md for details. ++ ++package blackfriday ++ ++import ( ++ "bytes" ++ "fmt" ++ "io" ++ "strings" ++ "unicode/utf8" ++) ++ ++// ++// Markdown parsing and processing ++// ++ ++// Version string of the package. Appears in the rendered document when ++// CompletePage flag is on. ++const Version = "2.0" ++ ++// Extensions is a bitwise or'ed collection of enabled Blackfriday's ++// extensions. ++type Extensions int ++ ++// These are the supported markdown parsing extensions. ++// OR these values together to select multiple extensions. ++const ( ++ NoExtensions Extensions = 0 ++ NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words ++ Tables // Render tables ++ FencedCode // Render fenced code blocks ++ Autolink // Detect embedded URLs that are not explicitly marked ++ Strikethrough // Strikethrough text using ~~test~~ ++ LaxHTMLBlocks // Loosen up HTML block parsing rules ++ SpaceHeadings // Be strict about prefix heading rules ++ HardLineBreak // Translate newlines into line breaks ++ TabSizeEight // Expand tabs to eight spaces instead of four ++ Footnotes // Pandoc-style footnotes ++ NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block ++ HeadingIDs // specify heading IDs with {#id} ++ Titleblock // Titleblock ala pandoc ++ AutoHeadingIDs // Create the heading ID from the text ++ BackslashLineBreak // Translate trailing backslashes into line breaks ++ DefinitionLists // Render definition lists ++ ++ CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | ++ SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes ++ ++ CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | ++ Autolink | Strikethrough | SpaceHeadings | HeadingIDs | ++ BackslashLineBreak | DefinitionLists ++) ++ ++// ListType contains bitwise or'ed flags for list and list item objects. ++type ListType int ++ ++// These are the possible flag values for the ListItem renderer. ++// Multiple flag values may be ORed together. ++// These are mostly of interest if you are writing a new output format. ++const ( ++ ListTypeOrdered ListType = 1 << iota ++ ListTypeDefinition ++ ListTypeTerm ++ ++ ListItemContainsBlock ++ ListItemBeginningOfList // TODO: figure out if this is of any use now ++ ListItemEndOfList ++) ++ ++// CellAlignFlags holds a type of alignment in a table cell. ++type CellAlignFlags int ++ ++// These are the possible flag values for the table cell renderer. ++// Only a single one of these values will be used; they are not ORed together. ++// These are mostly of interest if you are writing a new output format. ++const ( ++ TableAlignmentLeft CellAlignFlags = 1 << iota ++ TableAlignmentRight ++ TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) ++) ++ ++// The size of a tab stop. ++const ( ++ TabSizeDefault = 4 ++ TabSizeDouble = 8 ++) ++ ++// blockTags is a set of tags that are recognized as HTML block tags. ++// Any of these can be included in markdown text without special escaping. ++var blockTags = map[string]struct{}{ ++ "blockquote": {}, ++ "del": {}, ++ "div": {}, ++ "dl": {}, ++ "fieldset": {}, ++ "form": {}, ++ "h1": {}, ++ "h2": {}, ++ "h3": {}, ++ "h4": {}, ++ "h5": {}, ++ "h6": {}, ++ "iframe": {}, ++ "ins": {}, ++ "math": {}, ++ "noscript": {}, ++ "ol": {}, ++ "pre": {}, ++ "p": {}, ++ "script": {}, ++ "style": {}, ++ "table": {}, ++ "ul": {}, ++ ++ // HTML5 ++ "address": {}, ++ "article": {}, ++ "aside": {}, ++ "canvas": {}, ++ "figcaption": {}, ++ "figure": {}, ++ "footer": {}, ++ "header": {}, ++ "hgroup": {}, ++ "main": {}, ++ "nav": {}, ++ "output": {}, ++ "progress": {}, ++ "section": {}, ++ "video": {}, ++} ++ ++// Renderer is the rendering interface. This is mostly of interest if you are ++// implementing a new rendering format. ++// ++// Only an HTML implementation is provided in this repository, see the README ++// for external implementations. ++type Renderer interface { ++ // RenderNode is the main rendering method. It will be called once for ++ // every leaf node and twice for every non-leaf node (first with ++ // entering=true, then with entering=false). The method should write its ++ // rendition of the node to the supplied writer w. ++ RenderNode(w io.Writer, node *Node, entering bool) WalkStatus ++ ++ // RenderHeader is a method that allows the renderer to produce some ++ // content preceding the main body of the output document. The header is ++ // understood in the broad sense here. For example, the default HTML ++ // renderer will write not only the HTML document preamble, but also the ++ // table of contents if it was requested. ++ // ++ // The method will be passed an entire document tree, in case a particular ++ // implementation needs to inspect it to produce output. ++ // ++ // The output should be written to the supplied writer w. If your ++ // implementation has no header to write, supply an empty implementation. ++ RenderHeader(w io.Writer, ast *Node) ++ ++ // RenderFooter is a symmetric counterpart of RenderHeader. ++ RenderFooter(w io.Writer, ast *Node) ++} ++ ++// Callback functions for inline parsing. One such function is defined ++// for each character that triggers a response when parsing inline data. ++type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) ++ ++// Markdown is a type that holds extensions and the runtime state used by ++// Parse, and the renderer. You can not use it directly, construct it with New. ++type Markdown struct { ++ renderer Renderer ++ referenceOverride ReferenceOverrideFunc ++ refs map[string]*reference ++ inlineCallback [256]inlineParser ++ extensions Extensions ++ nesting int ++ maxNesting int ++ insideLink bool ++ ++ // Footnotes need to be ordered as well as available to quickly check for ++ // presence. If a ref is also a footnote, it's stored both in refs and here ++ // in notes. Slice is nil if footnotes not enabled. ++ notes []*reference ++ ++ doc *Node ++ tip *Node // = doc ++ oldTip *Node ++ lastMatchedContainer *Node // = doc ++ allClosed bool ++} ++ ++func (p *Markdown) getRef(refid string) (ref *reference, found bool) { ++ if p.referenceOverride != nil { ++ r, overridden := p.referenceOverride(refid) ++ if overridden { ++ if r == nil { ++ return nil, false ++ } ++ return &reference{ ++ link: []byte(r.Link), ++ title: []byte(r.Title), ++ noteID: 0, ++ hasBlock: false, ++ text: []byte(r.Text)}, true ++ } ++ } ++ // refs are case insensitive ++ ref, found = p.refs[strings.ToLower(refid)] ++ return ref, found ++} ++ ++func (p *Markdown) finalize(block *Node) { ++ above := block.Parent ++ block.open = false ++ p.tip = above ++} ++ ++func (p *Markdown) addChild(node NodeType, offset uint32) *Node { ++ return p.addExistingChild(NewNode(node), offset) ++} ++ ++func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { ++ for !p.tip.canContain(node.Type) { ++ p.finalize(p.tip) ++ } ++ p.tip.AppendChild(node) ++ p.tip = node ++ return node ++} ++ ++func (p *Markdown) closeUnmatchedBlocks() { ++ if !p.allClosed { ++ for p.oldTip != p.lastMatchedContainer { ++ parent := p.oldTip.Parent ++ p.finalize(p.oldTip) ++ p.oldTip = parent ++ } ++ p.allClosed = true ++ } ++} ++ ++// ++// ++// Public interface ++// ++// ++ ++// Reference represents the details of a link. ++// See the documentation in Options for more details on use-case. ++type Reference struct { ++ // Link is usually the URL the reference points to. ++ Link string ++ // Title is the alternate text describing the link in more detail. ++ Title string ++ // Text is the optional text to override the ref with if the syntax used was ++ // [refid][] ++ Text string ++} ++ ++// ReferenceOverrideFunc is expected to be called with a reference string and ++// return either a valid Reference type that the reference string maps to or ++// nil. If overridden is false, the default reference logic will be executed. ++// See the documentation in Options for more details on use-case. ++type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) ++ ++// New constructs a Markdown processor. You can use the same With* functions as ++// for Run() to customize parser's behavior and the renderer. ++func New(opts ...Option) *Markdown { ++ var p Markdown ++ for _, opt := range opts { ++ opt(&p) ++ } ++ p.refs = make(map[string]*reference) ++ p.maxNesting = 16 ++ p.insideLink = false ++ docNode := NewNode(Document) ++ p.doc = docNode ++ p.tip = docNode ++ p.oldTip = docNode ++ p.lastMatchedContainer = docNode ++ p.allClosed = true ++ // register inline parsers ++ p.inlineCallback[' '] = maybeLineBreak ++ p.inlineCallback['*'] = emphasis ++ p.inlineCallback['_'] = emphasis ++ if p.extensions&Strikethrough != 0 { ++ p.inlineCallback['~'] = emphasis ++ } ++ p.inlineCallback['`'] = codeSpan ++ p.inlineCallback['\n'] = lineBreak ++ p.inlineCallback['['] = link ++ p.inlineCallback['<'] = leftAngle ++ p.inlineCallback['\\'] = escape ++ p.inlineCallback['&'] = entity ++ p.inlineCallback['!'] = maybeImage ++ p.inlineCallback['^'] = maybeInlineFootnote ++ if p.extensions&Autolink != 0 { ++ p.inlineCallback['h'] = maybeAutoLink ++ p.inlineCallback['m'] = maybeAutoLink ++ p.inlineCallback['f'] = maybeAutoLink ++ p.inlineCallback['H'] = maybeAutoLink ++ p.inlineCallback['M'] = maybeAutoLink ++ p.inlineCallback['F'] = maybeAutoLink ++ } ++ if p.extensions&Footnotes != 0 { ++ p.notes = make([]*reference, 0) ++ } ++ return &p ++} ++ ++// Option customizes the Markdown processor's default behavior. ++type Option func(*Markdown) ++ ++// WithRenderer allows you to override the default renderer. ++func WithRenderer(r Renderer) Option { ++ return func(p *Markdown) { ++ p.renderer = r ++ } ++} ++ ++// WithExtensions allows you to pick some of the many extensions provided by ++// Blackfriday. You can bitwise OR them. ++func WithExtensions(e Extensions) Option { ++ return func(p *Markdown) { ++ p.extensions = e ++ } ++} ++ ++// WithNoExtensions turns off all extensions and custom behavior. ++func WithNoExtensions() Option { ++ return func(p *Markdown) { ++ p.extensions = NoExtensions ++ p.renderer = NewHTMLRenderer(HTMLRendererParameters{ ++ Flags: HTMLFlagsNone, ++ }) ++ } ++} ++ ++// WithRefOverride sets an optional function callback that is called every ++// time a reference is resolved. ++// ++// In Markdown, the link reference syntax can be made to resolve a link to ++// a reference instead of an inline URL, in one of the following ways: ++// ++// * [link text][refid] ++// * [refid][] ++// ++// Usually, the refid is defined at the bottom of the Markdown document. If ++// this override function is provided, the refid is passed to the override ++// function first, before consulting the defined refids at the bottom. If ++// the override function indicates an override did not occur, the refids at ++// the bottom will be used to fill in the link details. ++func WithRefOverride(o ReferenceOverrideFunc) Option { ++ return func(p *Markdown) { ++ p.referenceOverride = o ++ } ++} ++ ++// Run is the main entry point to Blackfriday. It parses and renders a ++// block of markdown-encoded text. ++// ++// The simplest invocation of Run takes one argument, input: ++// output := Run(input) ++// This will parse the input with CommonExtensions enabled and render it with ++// the default HTMLRenderer (with CommonHTMLFlags). ++// ++// Variadic arguments opts can customize the default behavior. Since Markdown ++// type does not contain exported fields, you can not use it directly. Instead, ++// use the With* functions. For example, this will call the most basic ++// functionality, with no extensions: ++// output := Run(input, WithNoExtensions()) ++// ++// You can use any number of With* arguments, even contradicting ones. They ++// will be applied in order of appearance and the latter will override the ++// former: ++// output := Run(input, WithNoExtensions(), WithExtensions(exts), ++// WithRenderer(yourRenderer)) ++func Run(input []byte, opts ...Option) []byte { ++ r := NewHTMLRenderer(HTMLRendererParameters{ ++ Flags: CommonHTMLFlags, ++ }) ++ optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} ++ optList = append(optList, opts...) ++ parser := New(optList...) ++ ast := parser.Parse(input) ++ var buf bytes.Buffer ++ parser.renderer.RenderHeader(&buf, ast) ++ ast.Walk(func(node *Node, entering bool) WalkStatus { ++ return parser.renderer.RenderNode(&buf, node, entering) ++ }) ++ parser.renderer.RenderFooter(&buf, ast) ++ return buf.Bytes() ++} ++ ++// Parse is an entry point to the parsing part of Blackfriday. It takes an ++// input markdown document and produces a syntax tree for its contents. This ++// tree can then be rendered with a default or custom renderer, or ++// analyzed/transformed by the caller to whatever non-standard needs they have. ++// The return value is the root node of the syntax tree. ++func (p *Markdown) Parse(input []byte) *Node { ++ p.block(input) ++ // Walk the tree and finish up some of unfinished blocks ++ for p.tip != nil { ++ p.finalize(p.tip) ++ } ++ // Walk the tree again and process inline markdown in each block ++ p.doc.Walk(func(node *Node, entering bool) WalkStatus { ++ if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { ++ p.inline(node, node.content) ++ node.content = nil ++ } ++ return GoToNext ++ }) ++ p.parseRefsToAST() ++ return p.doc ++} ++ ++func (p *Markdown) parseRefsToAST() { ++ if p.extensions&Footnotes == 0 || len(p.notes) == 0 { ++ return ++ } ++ p.tip = p.doc ++ block := p.addBlock(List, nil) ++ block.IsFootnotesList = true ++ block.ListFlags = ListTypeOrdered ++ flags := ListItemBeginningOfList ++ // Note: this loop is intentionally explicit, not range-form. This is ++ // because the body of the loop will append nested footnotes to p.notes and ++ // we need to process those late additions. Range form would only walk over ++ // the fixed initial set. ++ for i := 0; i < len(p.notes); i++ { ++ ref := p.notes[i] ++ p.addExistingChild(ref.footnote, 0) ++ block := ref.footnote ++ block.ListFlags = flags | ListTypeOrdered ++ block.RefLink = ref.link ++ if ref.hasBlock { ++ flags |= ListItemContainsBlock ++ p.block(ref.title) ++ } else { ++ p.inline(block, ref.title) ++ } ++ flags &^= ListItemBeginningOfList | ListItemContainsBlock ++ } ++ above := block.Parent ++ finalizeList(block) ++ p.tip = above ++ block.Walk(func(node *Node, entering bool) WalkStatus { ++ if node.Type == Paragraph || node.Type == Heading { ++ p.inline(node, node.content) ++ node.content = nil ++ } ++ return GoToNext ++ }) ++} ++ ++// ++// Link references ++// ++// This section implements support for references that (usually) appear ++// as footnotes in a document, and can be referenced anywhere in the document. ++// The basic format is: ++// ++// [1]: http://www.google.com/ "Google" ++// [2]: http://www.github.com/ "Github" ++// ++// Anywhere in the document, the reference can be linked by referring to its ++// label, i.e., 1 and 2 in this example, as in: ++// ++// This library is hosted on [Github][2], a git hosting site. ++// ++// Actual footnotes as specified in Pandoc and supported by some other Markdown ++// libraries such as php-markdown are also taken care of. They look like this: ++// ++// This sentence needs a bit of further explanation.[^note] ++// ++// [^note]: This is the explanation. ++// ++// Footnotes should be placed at the end of the document in an ordered list. ++// Finally, there are inline footnotes such as: ++// ++// Inline footnotes^[Also supported.] provide a quick inline explanation, ++// but are rendered at the bottom of the document. ++// ++ ++// reference holds all information necessary for a reference-style links or ++// footnotes. ++// ++// Consider this markdown with reference-style links: ++// ++// [link][ref] ++// ++// [ref]: /url/ "tooltip title" ++// ++// It will be ultimately converted to this HTML: ++// ++//

    link

    ++// ++// And a reference structure will be populated as follows: ++// ++// p.refs["ref"] = &reference{ ++// link: "/url/", ++// title: "tooltip title", ++// } ++// ++// Alternatively, reference can contain information about a footnote. Consider ++// this markdown: ++// ++// Text needing a footnote.[^a] ++// ++// [^a]: This is the note ++// ++// A reference structure will be populated as follows: ++// ++// p.refs["a"] = &reference{ ++// link: "a", ++// title: "This is the note", ++// noteID: , ++// } ++// ++// TODO: As you can see, it begs for splitting into two dedicated structures ++// for refs and for footnotes. ++type reference struct { ++ link []byte ++ title []byte ++ noteID int // 0 if not a footnote ref ++ hasBlock bool ++ footnote *Node // a link to the Item node within a list of footnotes ++ ++ text []byte // only gets populated by refOverride feature with Reference.Text ++} ++ ++func (r *reference) String() string { ++ return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", ++ r.link, r.title, r.text, r.noteID, r.hasBlock) ++} ++ ++// Check whether or not data starts with a reference link. ++// If so, it is parsed and stored in the list of references ++// (in the render struct). ++// Returns the number of bytes to skip to move past it, ++// or zero if the first line is not a reference. ++func isReference(p *Markdown, data []byte, tabSize int) int { ++ // up to 3 optional leading spaces ++ if len(data) < 4 { ++ return 0 ++ } ++ i := 0 ++ for i < 3 && data[i] == ' ' { ++ i++ ++ } ++ ++ noteID := 0 ++ ++ // id part: anything but a newline between brackets ++ if data[i] != '[' { ++ return 0 ++ } ++ i++ ++ if p.extensions&Footnotes != 0 { ++ if i < len(data) && data[i] == '^' { ++ // we can set it to anything here because the proper noteIds will ++ // be assigned later during the second pass. It just has to be != 0 ++ noteID = 1 ++ i++ ++ } ++ } ++ idOffset := i ++ for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { ++ i++ ++ } ++ if i >= len(data) || data[i] != ']' { ++ return 0 ++ } ++ idEnd := i ++ // footnotes can have empty ID, like this: [^], but a reference can not be ++ // empty like this: []. Break early if it's not a footnote and there's no ID ++ if noteID == 0 && idOffset == idEnd { ++ return 0 ++ } ++ // spacer: colon (space | tab)* newline? (space | tab)* ++ i++ ++ if i >= len(data) || data[i] != ':' { ++ return 0 ++ } ++ i++ ++ for i < len(data) && (data[i] == ' ' || data[i] == '\t') { ++ i++ ++ } ++ if i < len(data) && (data[i] == '\n' || data[i] == '\r') { ++ i++ ++ if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { ++ i++ ++ } ++ } ++ for i < len(data) && (data[i] == ' ' || data[i] == '\t') { ++ i++ ++ } ++ if i >= len(data) { ++ return 0 ++ } ++ ++ var ( ++ linkOffset, linkEnd int ++ titleOffset, titleEnd int ++ lineEnd int ++ raw []byte ++ hasBlock bool ++ ) ++ ++ if p.extensions&Footnotes != 0 && noteID != 0 { ++ linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) ++ lineEnd = linkEnd ++ } else { ++ linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) ++ } ++ if lineEnd == 0 { ++ return 0 ++ } ++ ++ // a valid ref has been found ++ ++ ref := &reference{ ++ noteID: noteID, ++ hasBlock: hasBlock, ++ } ++ ++ if noteID > 0 { ++ // reusing the link field for the id since footnotes don't have links ++ ref.link = data[idOffset:idEnd] ++ // if footnote, it's not really a title, it's the contained text ++ ref.title = raw ++ } else { ++ ref.link = data[linkOffset:linkEnd] ++ ref.title = data[titleOffset:titleEnd] ++ } ++ ++ // id matches are case-insensitive ++ id := string(bytes.ToLower(data[idOffset:idEnd])) ++ ++ p.refs[id] = ref ++ ++ return lineEnd ++} ++ ++func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { ++ // link: whitespace-free sequence, optionally between angle brackets ++ if data[i] == '<' { ++ i++ ++ } ++ linkOffset = i ++ for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { ++ i++ ++ } ++ linkEnd = i ++ if data[linkOffset] == '<' && data[linkEnd-1] == '>' { ++ linkOffset++ ++ linkEnd-- ++ } ++ ++ // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) ++ for i < len(data) && (data[i] == ' ' || data[i] == '\t') { ++ i++ ++ } ++ if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { ++ return ++ } ++ ++ // compute end-of-line ++ if i >= len(data) || data[i] == '\r' || data[i] == '\n' { ++ lineEnd = i ++ } ++ if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { ++ lineEnd++ ++ } ++ ++ // optional (space|tab)* spacer after a newline ++ if lineEnd > 0 { ++ i = lineEnd + 1 ++ for i < len(data) && (data[i] == ' ' || data[i] == '\t') { ++ i++ ++ } ++ } ++ ++ // optional title: any non-newline sequence enclosed in '"() alone on its line ++ if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { ++ i++ ++ titleOffset = i ++ ++ // look for EOL ++ for i < len(data) && data[i] != '\n' && data[i] != '\r' { ++ i++ ++ } ++ if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { ++ titleEnd = i + 1 ++ } else { ++ titleEnd = i ++ } ++ ++ // step back ++ i-- ++ for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { ++ i-- ++ } ++ if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { ++ lineEnd = titleEnd ++ titleEnd = i ++ } ++ } ++ ++ return ++} ++ ++// The first bit of this logic is the same as Parser.listItem, but the rest ++// is much simpler. This function simply finds the entire block and shifts it ++// over by one tab if it is indeed a block (just returns the line if it's not). ++// blockEnd is the end of the section in the input buffer, and contents is the ++// extracted text that was shifted over one tab. It will need to be rendered at ++// the end of the document. ++func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { ++ if i == 0 || len(data) == 0 { ++ return ++ } ++ ++ // skip leading whitespace on first line ++ for i < len(data) && data[i] == ' ' { ++ i++ ++ } ++ ++ blockStart = i ++ ++ // find the end of the line ++ blockEnd = i ++ for i < len(data) && data[i-1] != '\n' { ++ i++ ++ } ++ ++ // get working buffer ++ var raw bytes.Buffer ++ ++ // put the first line into the working buffer ++ raw.Write(data[blockEnd:i]) ++ blockEnd = i ++ ++ // process the following lines ++ containsBlankLine := false ++ ++gatherLines: ++ for blockEnd < len(data) { ++ i++ ++ ++ // find the end of this line ++ for i < len(data) && data[i-1] != '\n' { ++ i++ ++ } ++ ++ // if it is an empty line, guess that it is part of this item ++ // and move on to the next line ++ if p.isEmpty(data[blockEnd:i]) > 0 { ++ containsBlankLine = true ++ blockEnd = i ++ continue ++ } ++ ++ n := 0 ++ if n = isIndented(data[blockEnd:i], indentSize); n == 0 { ++ // this is the end of the block. ++ // we don't want to include this last line in the index. ++ break gatherLines ++ } ++ ++ // if there were blank lines before this one, insert a new one now ++ if containsBlankLine { ++ raw.WriteByte('\n') ++ containsBlankLine = false ++ } ++ ++ // get rid of that first tab, write to buffer ++ raw.Write(data[blockEnd+n : i]) ++ hasBlock = true ++ ++ blockEnd = i ++ } ++ ++ if data[blockEnd-1] != '\n' { ++ raw.WriteByte('\n') ++ } ++ ++ contents = raw.Bytes() ++ ++ return ++} ++ ++// ++// ++// Miscellaneous helper functions ++// ++// ++ ++// Test if a character is a punctuation symbol. ++// Taken from a private function in regexp in the stdlib. ++func ispunct(c byte) bool { ++ for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { ++ if c == r { ++ return true ++ } ++ } ++ return false ++} ++ ++// Test if a character is a whitespace character. ++func isspace(c byte) bool { ++ return ishorizontalspace(c) || isverticalspace(c) ++} ++ ++// Test if a character is a horizontal whitespace character. ++func ishorizontalspace(c byte) bool { ++ return c == ' ' || c == '\t' ++} ++ ++// Test if a character is a vertical character. ++func isverticalspace(c byte) bool { ++ return c == '\n' || c == '\r' || c == '\f' || c == '\v' ++} ++ ++// Test if a character is letter. ++func isletter(c byte) bool { ++ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ++} ++ ++// Test if a character is a letter or a digit. ++// TODO: check when this is looking for ASCII alnum and when it should use unicode ++func isalnum(c byte) bool { ++ return (c >= '0' && c <= '9') || isletter(c) ++} ++ ++// Replace tab characters with spaces, aligning to the next TAB_SIZE column. ++// always ends output with a newline ++func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { ++ // first, check for common cases: no tabs, or only tabs at beginning of line ++ i, prefix := 0, 0 ++ slowcase := false ++ for i = 0; i < len(line); i++ { ++ if line[i] == '\t' { ++ if prefix == i { ++ prefix++ ++ } else { ++ slowcase = true ++ break ++ } ++ } ++ } ++ ++ // no need to decode runes if all tabs are at the beginning of the line ++ if !slowcase { ++ for i = 0; i < prefix*tabSize; i++ { ++ out.WriteByte(' ') ++ } ++ out.Write(line[prefix:]) ++ return ++ } ++ ++ // the slow case: we need to count runes to figure out how ++ // many spaces to insert for each tab ++ column := 0 ++ i = 0 ++ for i < len(line) { ++ start := i ++ for i < len(line) && line[i] != '\t' { ++ _, size := utf8.DecodeRune(line[i:]) ++ i += size ++ column++ ++ } ++ ++ if i > start { ++ out.Write(line[start:i]) ++ } ++ ++ if i >= len(line) { ++ break ++ } ++ ++ for { ++ out.WriteByte(' ') ++ column++ ++ if column%tabSize == 0 { ++ break ++ } ++ } ++ ++ i++ ++ } ++} ++ ++// Find if a line counts as indented or not. ++// Returns number of characters the indent is (0 = not indented). ++func isIndented(data []byte, indentSize int) int { ++ if len(data) == 0 { ++ return 0 ++ } ++ if data[0] == '\t' { ++ return 1 ++ } ++ if len(data) < indentSize { ++ return 0 ++ } ++ for i := 0; i < indentSize; i++ { ++ if data[i] != ' ' { ++ return 0 ++ } ++ } ++ return indentSize ++} ++ ++// Create a url-safe slug for fragments ++func slugify(in []byte) []byte { ++ if len(in) == 0 { ++ return in ++ } ++ out := make([]byte, 0, len(in)) ++ sym := false ++ ++ for _, ch := range in { ++ if isalnum(ch) { ++ sym = false ++ out = append(out, ch) ++ } else if sym { ++ continue ++ } else { ++ out = append(out, '-') ++ sym = true ++ } ++ } ++ var a, b int ++ var ch byte ++ for a, ch = range out { ++ if ch != '-' { ++ break ++ } ++ } ++ for b = len(out) - 1; b > 0; b-- { ++ if out[b] != '-' { ++ break ++ } ++ } ++ return out[a : b+1] ++} +diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go +new file mode 100644 +index 000000000000..04e6050ceeae +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/node.go +@@ -0,0 +1,360 @@ ++package blackfriday ++ ++import ( ++ "bytes" ++ "fmt" ++) ++ ++// NodeType specifies a type of a single node of a syntax tree. Usually one ++// node (and its type) corresponds to a single markdown feature, e.g. emphasis ++// or code block. ++type NodeType int ++ ++// Constants for identifying different types of nodes. See NodeType. ++const ( ++ Document NodeType = iota ++ BlockQuote ++ List ++ Item ++ Paragraph ++ Heading ++ HorizontalRule ++ Emph ++ Strong ++ Del ++ Link ++ Image ++ Text ++ HTMLBlock ++ CodeBlock ++ Softbreak ++ Hardbreak ++ Code ++ HTMLSpan ++ Table ++ TableCell ++ TableHead ++ TableBody ++ TableRow ++) ++ ++var nodeTypeNames = []string{ ++ Document: "Document", ++ BlockQuote: "BlockQuote", ++ List: "List", ++ Item: "Item", ++ Paragraph: "Paragraph", ++ Heading: "Heading", ++ HorizontalRule: "HorizontalRule", ++ Emph: "Emph", ++ Strong: "Strong", ++ Del: "Del", ++ Link: "Link", ++ Image: "Image", ++ Text: "Text", ++ HTMLBlock: "HTMLBlock", ++ CodeBlock: "CodeBlock", ++ Softbreak: "Softbreak", ++ Hardbreak: "Hardbreak", ++ Code: "Code", ++ HTMLSpan: "HTMLSpan", ++ Table: "Table", ++ TableCell: "TableCell", ++ TableHead: "TableHead", ++ TableBody: "TableBody", ++ TableRow: "TableRow", ++} ++ ++func (t NodeType) String() string { ++ return nodeTypeNames[t] ++} ++ ++// ListData contains fields relevant to a List and Item node type. ++type ListData struct { ++ ListFlags ListType ++ Tight bool // Skip

    s around list item data if true ++ BulletChar byte // '*', '+' or '-' in bullet lists ++ Delimiter byte // '.' or ')' after the number in ordered lists ++ RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering ++ IsFootnotesList bool // This is a list of footnotes ++} ++ ++// LinkData contains fields relevant to a Link node type. ++type LinkData struct { ++ Destination []byte // Destination is what goes into a href ++ Title []byte // Title is the tooltip thing that goes in a title attribute ++ NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote ++ Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. ++} ++ ++// CodeBlockData contains fields relevant to a CodeBlock node type. ++type CodeBlockData struct { ++ IsFenced bool // Specifies whether it's a fenced code block or an indented one ++ Info []byte // This holds the info string ++ FenceChar byte ++ FenceLength int ++ FenceOffset int ++} ++ ++// TableCellData contains fields relevant to a TableCell node type. ++type TableCellData struct { ++ IsHeader bool // This tells if it's under the header row ++ Align CellAlignFlags // This holds the value for align attribute ++} ++ ++// HeadingData contains fields relevant to a Heading node type. ++type HeadingData struct { ++ Level int // This holds the heading level number ++ HeadingID string // This might hold heading ID, if present ++ IsTitleblock bool // Specifies whether it's a title block ++} ++ ++// Node is a single element in the abstract syntax tree of the parsed document. ++// It holds connections to the structurally neighboring nodes and, for certain ++// types of nodes, additional information that might be needed when rendering. ++type Node struct { ++ Type NodeType // Determines the type of the node ++ Parent *Node // Points to the parent ++ FirstChild *Node // Points to the first child, if any ++ LastChild *Node // Points to the last child, if any ++ Prev *Node // Previous sibling; nil if it's the first child ++ Next *Node // Next sibling; nil if it's the last child ++ ++ Literal []byte // Text contents of the leaf nodes ++ ++ HeadingData // Populated if Type is Heading ++ ListData // Populated if Type is List ++ CodeBlockData // Populated if Type is CodeBlock ++ LinkData // Populated if Type is Link ++ TableCellData // Populated if Type is TableCell ++ ++ content []byte // Markdown content of the block nodes ++ open bool // Specifies an open block node that has not been finished to process yet ++} ++ ++// NewNode allocates a node of a specified type. ++func NewNode(typ NodeType) *Node { ++ return &Node{ ++ Type: typ, ++ open: true, ++ } ++} ++ ++func (n *Node) String() string { ++ ellipsis := "" ++ snippet := n.Literal ++ if len(snippet) > 16 { ++ snippet = snippet[:16] ++ ellipsis = "..." ++ } ++ return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) ++} ++ ++// Unlink removes node 'n' from the tree. ++// It panics if the node is nil. ++func (n *Node) Unlink() { ++ if n.Prev != nil { ++ n.Prev.Next = n.Next ++ } else if n.Parent != nil { ++ n.Parent.FirstChild = n.Next ++ } ++ if n.Next != nil { ++ n.Next.Prev = n.Prev ++ } else if n.Parent != nil { ++ n.Parent.LastChild = n.Prev ++ } ++ n.Parent = nil ++ n.Next = nil ++ n.Prev = nil ++} ++ ++// AppendChild adds a node 'child' as a child of 'n'. ++// It panics if either node is nil. ++func (n *Node) AppendChild(child *Node) { ++ child.Unlink() ++ child.Parent = n ++ if n.LastChild != nil { ++ n.LastChild.Next = child ++ child.Prev = n.LastChild ++ n.LastChild = child ++ } else { ++ n.FirstChild = child ++ n.LastChild = child ++ } ++} ++ ++// InsertBefore inserts 'sibling' immediately before 'n'. ++// It panics if either node is nil. ++func (n *Node) InsertBefore(sibling *Node) { ++ sibling.Unlink() ++ sibling.Prev = n.Prev ++ if sibling.Prev != nil { ++ sibling.Prev.Next = sibling ++ } ++ sibling.Next = n ++ n.Prev = sibling ++ sibling.Parent = n.Parent ++ if sibling.Prev == nil { ++ sibling.Parent.FirstChild = sibling ++ } ++} ++ ++// IsContainer returns true if 'n' can contain children. ++func (n *Node) IsContainer() bool { ++ switch n.Type { ++ case Document: ++ fallthrough ++ case BlockQuote: ++ fallthrough ++ case List: ++ fallthrough ++ case Item: ++ fallthrough ++ case Paragraph: ++ fallthrough ++ case Heading: ++ fallthrough ++ case Emph: ++ fallthrough ++ case Strong: ++ fallthrough ++ case Del: ++ fallthrough ++ case Link: ++ fallthrough ++ case Image: ++ fallthrough ++ case Table: ++ fallthrough ++ case TableHead: ++ fallthrough ++ case TableBody: ++ fallthrough ++ case TableRow: ++ fallthrough ++ case TableCell: ++ return true ++ default: ++ return false ++ } ++} ++ ++// IsLeaf returns true if 'n' is a leaf node. ++func (n *Node) IsLeaf() bool { ++ return !n.IsContainer() ++} ++ ++func (n *Node) canContain(t NodeType) bool { ++ if n.Type == List { ++ return t == Item ++ } ++ if n.Type == Document || n.Type == BlockQuote || n.Type == Item { ++ return t != Item ++ } ++ if n.Type == Table { ++ return t == TableHead || t == TableBody ++ } ++ if n.Type == TableHead || n.Type == TableBody { ++ return t == TableRow ++ } ++ if n.Type == TableRow { ++ return t == TableCell ++ } ++ return false ++} ++ ++// WalkStatus allows NodeVisitor to have some control over the tree traversal. ++// It is returned from NodeVisitor and different values allow Node.Walk to ++// decide which node to go to next. ++type WalkStatus int ++ ++const ( ++ // GoToNext is the default traversal of every node. ++ GoToNext WalkStatus = iota ++ // SkipChildren tells walker to skip all children of current node. ++ SkipChildren ++ // Terminate tells walker to terminate the traversal. ++ Terminate ++) ++ ++// NodeVisitor is a callback to be called when traversing the syntax tree. ++// Called twice for every node: once with entering=true when the branch is ++// first visited, then with entering=false after all the children are done. ++type NodeVisitor func(node *Node, entering bool) WalkStatus ++ ++// Walk is a convenience method that instantiates a walker and starts a ++// traversal of subtree rooted at n. ++func (n *Node) Walk(visitor NodeVisitor) { ++ w := newNodeWalker(n) ++ for w.current != nil { ++ status := visitor(w.current, w.entering) ++ switch status { ++ case GoToNext: ++ w.next() ++ case SkipChildren: ++ w.entering = false ++ w.next() ++ case Terminate: ++ return ++ } ++ } ++} ++ ++type nodeWalker struct { ++ current *Node ++ root *Node ++ entering bool ++} ++ ++func newNodeWalker(root *Node) *nodeWalker { ++ return &nodeWalker{ ++ current: root, ++ root: root, ++ entering: true, ++ } ++} ++ ++func (nw *nodeWalker) next() { ++ if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { ++ nw.current = nil ++ return ++ } ++ if nw.entering && nw.current.IsContainer() { ++ if nw.current.FirstChild != nil { ++ nw.current = nw.current.FirstChild ++ nw.entering = true ++ } else { ++ nw.entering = false ++ } ++ } else if nw.current.Next == nil { ++ nw.current = nw.current.Parent ++ nw.entering = false ++ } else { ++ nw.current = nw.current.Next ++ nw.entering = true ++ } ++} ++ ++func dump(ast *Node) { ++ fmt.Println(dumpString(ast)) ++} ++ ++func dumpR(ast *Node, depth int) string { ++ if ast == nil { ++ return "" ++ } ++ indent := bytes.Repeat([]byte("\t"), depth) ++ content := ast.Literal ++ if content == nil { ++ content = ast.content ++ } ++ result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) ++ for n := ast.FirstChild; n != nil; n = n.Next { ++ result += dumpR(n, depth+1) ++ } ++ return result ++} ++ ++func dumpString(ast *Node) string { ++ return dumpR(ast, 0) ++} +diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go +new file mode 100644 +index 000000000000..3a220e94247d +--- /dev/null ++++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go +@@ -0,0 +1,457 @@ ++// ++// Blackfriday Markdown Processor ++// Available at http://github.com/russross/blackfriday ++// ++// Copyright © 2011 Russ Ross . ++// Distributed under the Simplified BSD License. ++// See README.md for details. ++// ++ ++// ++// ++// SmartyPants rendering ++// ++// ++ ++package blackfriday ++ ++import ( ++ "bytes" ++ "io" ++) ++ ++// SPRenderer is a struct containing state of a Smartypants renderer. ++type SPRenderer struct { ++ inSingleQuote bool ++ inDoubleQuote bool ++ callbacks [256]smartCallback ++} ++ ++func wordBoundary(c byte) bool { ++ return c == 0 || isspace(c) || ispunct(c) ++} ++ ++func tolower(c byte) byte { ++ if c >= 'A' && c <= 'Z' { ++ return c - 'A' + 'a' ++ } ++ return c ++} ++ ++func isdigit(c byte) bool { ++ return c >= '0' && c <= '9' ++} ++ ++func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { ++ // edge of the buffer is likely to be a tag that we don't get to see, ++ // so we treat it like text sometimes ++ ++ // enumerate all sixteen possibilities for (previousChar, nextChar) ++ // each can be one of {0, space, punct, other} ++ switch { ++ case previousChar == 0 && nextChar == 0: ++ // context is not any help here, so toggle ++ *isOpen = !*isOpen ++ case isspace(previousChar) && nextChar == 0: ++ // [ "] might be [ "foo...] ++ *isOpen = true ++ case ispunct(previousChar) && nextChar == 0: ++ // [!"] hmm... could be [Run!"] or [("...] ++ *isOpen = false ++ case /* isnormal(previousChar) && */ nextChar == 0: ++ // [a"] is probably a close ++ *isOpen = false ++ case previousChar == 0 && isspace(nextChar): ++ // [" ] might be [...foo" ] ++ *isOpen = false ++ case isspace(previousChar) && isspace(nextChar): ++ // [ " ] context is not any help here, so toggle ++ *isOpen = !*isOpen ++ case ispunct(previousChar) && isspace(nextChar): ++ // [!" ] is probably a close ++ *isOpen = false ++ case /* isnormal(previousChar) && */ isspace(nextChar): ++ // [a" ] this is one of the easy cases ++ *isOpen = false ++ case previousChar == 0 && ispunct(nextChar): ++ // ["!] hmm... could be ["$1.95] or ["!...] ++ *isOpen = false ++ case isspace(previousChar) && ispunct(nextChar): ++ // [ "!] looks more like [ "$1.95] ++ *isOpen = true ++ case ispunct(previousChar) && ispunct(nextChar): ++ // [!"!] context is not any help here, so toggle ++ *isOpen = !*isOpen ++ case /* isnormal(previousChar) && */ ispunct(nextChar): ++ // [a"!] is probably a close ++ *isOpen = false ++ case previousChar == 0 /* && isnormal(nextChar) */ : ++ // ["a] is probably an open ++ *isOpen = true ++ case isspace(previousChar) /* && isnormal(nextChar) */ : ++ // [ "a] this is one of the easy cases ++ *isOpen = true ++ case ispunct(previousChar) /* && isnormal(nextChar) */ : ++ // [!"a] is probably an open ++ *isOpen = true ++ default: ++ // [a'b] maybe a contraction? ++ *isOpen = false ++ } ++ ++ // Note that with the limited lookahead, this non-breaking ++ // space will also be appended to single double quotes. ++ if addNBSP && !*isOpen { ++ out.WriteString(" ") ++ } ++ ++ out.WriteByte('&') ++ if *isOpen { ++ out.WriteByte('l') ++ } else { ++ out.WriteByte('r') ++ } ++ out.WriteByte(quote) ++ out.WriteString("quo;") ++ ++ if addNBSP && *isOpen { ++ out.WriteString(" ") ++ } ++ ++ return true ++} ++ ++func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { ++ if len(text) >= 2 { ++ t1 := tolower(text[1]) ++ ++ if t1 == '\'' { ++ nextChar := byte(0) ++ if len(text) >= 3 { ++ nextChar = text[2] ++ } ++ if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { ++ return 1 ++ } ++ } ++ ++ if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { ++ out.WriteString("’") ++ return 0 ++ } ++ ++ if len(text) >= 3 { ++ t2 := tolower(text[2]) ++ ++ if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && ++ (len(text) < 4 || wordBoundary(text[3])) { ++ out.WriteString("’") ++ return 0 ++ } ++ } ++ } ++ ++ nextChar := byte(0) ++ if len(text) > 1 { ++ nextChar = text[1] ++ } ++ if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { ++ return 0 ++ } ++ ++ out.WriteByte(text[0]) ++ return 0 ++} ++ ++func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { ++ if len(text) >= 3 { ++ t1 := tolower(text[1]) ++ t2 := tolower(text[2]) ++ ++ if t1 == 'c' && t2 == ')' { ++ out.WriteString("©") ++ return 2 ++ } ++ ++ if t1 == 'r' && t2 == ')' { ++ out.WriteString("®") ++ return 2 ++ } ++ ++ if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { ++ out.WriteString("™") ++ return 3 ++ } ++ } ++ ++ out.WriteByte(text[0]) ++ return 0 ++} ++ ++func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { ++ if len(text) >= 2 { ++ if text[1] == '-' { ++ out.WriteString("—") ++ return 1 ++ } ++ ++ if wordBoundary(previousChar) && wordBoundary(text[1]) { ++ out.WriteString("–") ++ return 0 ++ } ++ } ++ ++ out.WriteByte(text[0]) ++ return 0 ++} ++ ++func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { ++ if len(text) >= 3 && text[1] == '-' && text[2] == '-' { ++ out.WriteString("—") ++ return 2 ++ } ++ if len(text) >= 2 && text[1] == '-' { ++ out.WriteString("–") ++ return 1 ++ } ++ ++ out.WriteByte(text[0]) ++ return 0 ++} ++ ++func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { ++ if bytes.HasPrefix(text, []byte(""")) { ++ nextChar := byte(0) ++ if len(text) >= 7 { ++ nextChar = text[6] ++ } ++ if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { ++ return 5 ++ } ++ } ++ ++ if bytes.HasPrefix(text, []byte("�")) { ++ return 3 ++ } ++ ++ out.WriteByte('&') ++ return 0 ++} ++ ++func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { ++ var quote byte = 'd' ++ if angledQuotes { ++ quote = 'a' ++ } ++ ++ return func(out *bytes.Buffer, previousChar byte, text []byte) int { ++ return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) ++ } ++} ++ ++func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { ++ if len(text) >= 3 && text[1] == '.' && text[2] == '.' { ++ out.WriteString("…") ++ return 2 ++ } ++ ++ if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { ++ out.WriteString("…") ++ return 4 ++ } ++ ++ out.WriteByte(text[0]) ++ return 0 ++} ++ ++func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { ++ if len(text) >= 2 && text[1] == '`' { ++ nextChar := byte(0) ++ if len(text) >= 3 { ++ nextChar = text[2] ++ } ++ if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { ++ return 1 ++ } ++ } ++ ++ out.WriteByte(text[0]) ++ return 0 ++} ++ ++func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { ++ if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { ++ // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b ++ // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) ++ // and avoid changing dates like 1/23/2005 into fractions. ++ numEnd := 0 ++ for len(text) > numEnd && isdigit(text[numEnd]) { ++ numEnd++ ++ } ++ if numEnd == 0 { ++ out.WriteByte(text[0]) ++ return 0 ++ } ++ denStart := numEnd + 1 ++ if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { ++ denStart = numEnd + 3 ++ } else if len(text) < numEnd+2 || text[numEnd] != '/' { ++ out.WriteByte(text[0]) ++ return 0 ++ } ++ denEnd := denStart ++ for len(text) > denEnd && isdigit(text[denEnd]) { ++ denEnd++ ++ } ++ if denEnd == denStart { ++ out.WriteByte(text[0]) ++ return 0 ++ } ++ if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { ++ out.WriteString("") ++ out.Write(text[:numEnd]) ++ out.WriteString("") ++ out.Write(text[denStart:denEnd]) ++ out.WriteString("") ++ return denEnd - 1 ++ } ++ } ++ ++ out.WriteByte(text[0]) ++ return 0 ++} ++ ++func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { ++ if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { ++ if text[0] == '1' && text[1] == '/' && text[2] == '2' { ++ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { ++ out.WriteString("½") ++ return 2 ++ } ++ } ++ ++ if text[0] == '1' && text[1] == '/' && text[2] == '4' { ++ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { ++ out.WriteString("¼") ++ return 2 ++ } ++ } ++ ++ if text[0] == '3' && text[1] == '/' && text[2] == '4' { ++ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { ++ out.WriteString("¾") ++ return 2 ++ } ++ } ++ } ++ ++ out.WriteByte(text[0]) ++ return 0 ++} ++ ++func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { ++ nextChar := byte(0) ++ if len(text) > 1 { ++ nextChar = text[1] ++ } ++ if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { ++ out.WriteString(""") ++ } ++ ++ return 0 ++} ++ ++func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { ++ return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') ++} ++ ++func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { ++ return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') ++} ++ ++func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { ++ i := 0 ++ ++ for i < len(text) && text[i] != '>' { ++ i++ ++ } ++ ++ out.Write(text[:i+1]) ++ return i ++} ++ ++type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int ++ ++// NewSmartypantsRenderer constructs a Smartypants renderer object. ++func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { ++ var ( ++ r SPRenderer ++ ++ smartAmpAngled = r.smartAmp(true, false) ++ smartAmpAngledNBSP = r.smartAmp(true, true) ++ smartAmpRegular = r.smartAmp(false, false) ++ smartAmpRegularNBSP = r.smartAmp(false, true) ++ ++ addNBSP = flags&SmartypantsQuotesNBSP != 0 ++ ) ++ ++ if flags&SmartypantsAngledQuotes == 0 { ++ r.callbacks['"'] = r.smartDoubleQuote ++ if !addNBSP { ++ r.callbacks['&'] = smartAmpRegular ++ } else { ++ r.callbacks['&'] = smartAmpRegularNBSP ++ } ++ } else { ++ r.callbacks['"'] = r.smartAngledDoubleQuote ++ if !addNBSP { ++ r.callbacks['&'] = smartAmpAngled ++ } else { ++ r.callbacks['&'] = smartAmpAngledNBSP ++ } ++ } ++ r.callbacks['\''] = r.smartSingleQuote ++ r.callbacks['('] = r.smartParens ++ if flags&SmartypantsDashes != 0 { ++ if flags&SmartypantsLatexDashes == 0 { ++ r.callbacks['-'] = r.smartDash ++ } else { ++ r.callbacks['-'] = r.smartDashLatex ++ } ++ } ++ r.callbacks['.'] = r.smartPeriod ++ if flags&SmartypantsFractions == 0 { ++ r.callbacks['1'] = r.smartNumber ++ r.callbacks['3'] = r.smartNumber ++ } else { ++ for ch := '1'; ch <= '9'; ch++ { ++ r.callbacks[ch] = r.smartNumberGeneric ++ } ++ } ++ r.callbacks['<'] = r.smartLeftAngle ++ r.callbacks['`'] = r.smartBacktick ++ return &r ++} ++ ++// Process is the entry point of the Smartypants renderer. ++func (r *SPRenderer) Process(w io.Writer, text []byte) { ++ mark := 0 ++ for i := 0; i < len(text); i++ { ++ if action := r.callbacks[text[i]]; action != nil { ++ if i > mark { ++ w.Write(text[mark:i]) ++ } ++ previousChar := byte(0) ++ if i > 0 { ++ previousChar = text[i-1] ++ } ++ var tmp bytes.Buffer ++ i += action(&tmp, previousChar, text[i:]) ++ w.Write(tmp.Bytes()) ++ mark = i + 1 ++ } ++ } ++ if mark < len(text) { ++ w.Write(text[mark:]) ++ } ++} +diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go +new file mode 100644 +index 000000000000..b8c15ce88543 +--- /dev/null ++++ b/vendor/github.com/spf13/cobra/doc/man_docs.go +@@ -0,0 +1,246 @@ ++// Copyright 2013-2023 The Cobra Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package doc ++ ++import ( ++ "bytes" ++ "fmt" ++ "io" ++ "os" ++ "path/filepath" ++ "sort" ++ "strconv" ++ "strings" ++ "time" ++ ++ "github.com/cpuguy83/go-md2man/v2/md2man" ++ "github.com/spf13/cobra" ++ "github.com/spf13/pflag" ++) ++ ++// GenManTree will generate a man page for this command and all descendants ++// in the directory given. The header may be nil. This function may not work ++// correctly if your command names have `-` in them. If you have `cmd` with two ++// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` ++// it is undefined which help output will be in the file `cmd-sub-third.1`. ++func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error { ++ return GenManTreeFromOpts(cmd, GenManTreeOptions{ ++ Header: header, ++ Path: dir, ++ CommandSeparator: "-", ++ }) ++} ++ ++// GenManTreeFromOpts generates a man page for the command and all descendants. ++// The pages are written to the opts.Path directory. ++func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error { ++ header := opts.Header ++ if header == nil { ++ header = &GenManHeader{} ++ } ++ for _, c := range cmd.Commands() { ++ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ if err := GenManTreeFromOpts(c, opts); err != nil { ++ return err ++ } ++ } ++ section := "1" ++ if header.Section != "" { ++ section = header.Section ++ } ++ ++ separator := "_" ++ if opts.CommandSeparator != "" { ++ separator = opts.CommandSeparator ++ } ++ basename := strings.ReplaceAll(cmd.CommandPath(), " ", separator) ++ filename := filepath.Join(opts.Path, basename+"."+section) ++ f, err := os.Create(filename) ++ if err != nil { ++ return err ++ } ++ defer f.Close() ++ ++ headerCopy := *header ++ return GenMan(cmd, &headerCopy, f) ++} ++ ++// GenManTreeOptions is the options for generating the man pages. ++// Used only in GenManTreeFromOpts. ++type GenManTreeOptions struct { ++ Header *GenManHeader ++ Path string ++ CommandSeparator string ++} ++ ++// GenManHeader is a lot like the .TH header at the start of man pages. These ++// include the title, section, date, source, and manual. We will use the ++// current time if Date is unset and will use "Auto generated by spf13/cobra" ++// if the Source is unset. ++type GenManHeader struct { ++ Title string ++ Section string ++ Date *time.Time ++ date string ++ Source string ++ Manual string ++} ++ ++// GenMan will generate a man page for the given command and write it to ++// w. The header argument may be nil, however obviously w may not. ++func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error { ++ if header == nil { ++ header = &GenManHeader{} ++ } ++ if err := fillHeader(header, cmd.CommandPath(), cmd.DisableAutoGenTag); err != nil { ++ return err ++ } ++ ++ b := genMan(cmd, header) ++ _, err := w.Write(md2man.Render(b)) ++ return err ++} ++ ++func fillHeader(header *GenManHeader, name string, disableAutoGen bool) error { ++ if header.Title == "" { ++ header.Title = strings.ToUpper(strings.ReplaceAll(name, " ", "\\-")) ++ } ++ if header.Section == "" { ++ header.Section = "1" ++ } ++ if header.Date == nil { ++ now := time.Now() ++ if epoch := os.Getenv("SOURCE_DATE_EPOCH"); epoch != "" { ++ unixEpoch, err := strconv.ParseInt(epoch, 10, 64) ++ if err != nil { ++ return fmt.Errorf("invalid SOURCE_DATE_EPOCH: %v", err) ++ } ++ now = time.Unix(unixEpoch, 0) ++ } ++ header.Date = &now ++ } ++ header.date = (*header.Date).Format("Jan 2006") ++ if header.Source == "" && !disableAutoGen { ++ header.Source = "Auto generated by spf13/cobra" ++ } ++ return nil ++} ++ ++func manPreamble(buf io.StringWriter, header *GenManHeader, cmd *cobra.Command, dashedName string) { ++ description := cmd.Long ++ if len(description) == 0 { ++ description = cmd.Short ++ } ++ ++ cobra.WriteStringAndCheck(buf, fmt.Sprintf(`%% "%s" "%s" "%s" "%s" "%s" ++# NAME ++`, header.Title, header.Section, header.date, header.Source, header.Manual)) ++ cobra.WriteStringAndCheck(buf, fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short)) ++ cobra.WriteStringAndCheck(buf, "# SYNOPSIS\n") ++ cobra.WriteStringAndCheck(buf, fmt.Sprintf("**%s**\n\n", cmd.UseLine())) ++ cobra.WriteStringAndCheck(buf, "# DESCRIPTION\n") ++ cobra.WriteStringAndCheck(buf, description+"\n\n") ++} ++ ++func manPrintFlags(buf io.StringWriter, flags *pflag.FlagSet) { ++ flags.VisitAll(func(flag *pflag.Flag) { ++ if len(flag.Deprecated) > 0 || flag.Hidden { ++ return ++ } ++ format := "" ++ if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { ++ format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name) ++ } else { ++ format = fmt.Sprintf("**--%s**", flag.Name) ++ } ++ if len(flag.NoOptDefVal) > 0 { ++ format += "[" ++ } ++ if flag.Value.Type() == "string" { ++ // put quotes on the value ++ format += "=%q" ++ } else { ++ format += "=%s" ++ } ++ if len(flag.NoOptDefVal) > 0 { ++ format += "]" ++ } ++ format += "\n\t%s\n\n" ++ cobra.WriteStringAndCheck(buf, fmt.Sprintf(format, flag.DefValue, flag.Usage)) ++ }) ++} ++ ++func manPrintOptions(buf io.StringWriter, command *cobra.Command) { ++ flags := command.NonInheritedFlags() ++ if flags.HasAvailableFlags() { ++ cobra.WriteStringAndCheck(buf, "# OPTIONS\n") ++ manPrintFlags(buf, flags) ++ cobra.WriteStringAndCheck(buf, "\n") ++ } ++ flags = command.InheritedFlags() ++ if flags.HasAvailableFlags() { ++ cobra.WriteStringAndCheck(buf, "# OPTIONS INHERITED FROM PARENT COMMANDS\n") ++ manPrintFlags(buf, flags) ++ cobra.WriteStringAndCheck(buf, "\n") ++ } ++} ++ ++func genMan(cmd *cobra.Command, header *GenManHeader) []byte { ++ cmd.InitDefaultHelpCmd() ++ cmd.InitDefaultHelpFlag() ++ ++ // something like `rootcmd-subcmd1-subcmd2` ++ dashCommandName := strings.ReplaceAll(cmd.CommandPath(), " ", "-") ++ ++ buf := new(bytes.Buffer) ++ ++ manPreamble(buf, header, cmd, dashCommandName) ++ manPrintOptions(buf, cmd) ++ if len(cmd.Example) > 0 { ++ buf.WriteString("# EXAMPLE\n") ++ buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example)) ++ } ++ if hasSeeAlso(cmd) { ++ buf.WriteString("# SEE ALSO\n") ++ seealsos := make([]string, 0) ++ if cmd.HasParent() { ++ parentPath := cmd.Parent().CommandPath() ++ dashParentPath := strings.ReplaceAll(parentPath, " ", "-") ++ seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section) ++ seealsos = append(seealsos, seealso) ++ cmd.VisitParents(func(c *cobra.Command) { ++ if c.DisableAutoGenTag { ++ cmd.DisableAutoGenTag = c.DisableAutoGenTag ++ } ++ }) ++ } ++ children := cmd.Commands() ++ sort.Sort(byName(children)) ++ for _, c := range children { ++ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section) ++ seealsos = append(seealsos, seealso) ++ } ++ buf.WriteString(strings.Join(seealsos, ", ") + "\n") ++ } ++ if !cmd.DisableAutoGenTag { ++ buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006"))) ++ } ++ return buf.Bytes() ++} +diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go +new file mode 100644 +index 000000000000..f98fe2a3b8f3 +--- /dev/null ++++ b/vendor/github.com/spf13/cobra/doc/md_docs.go +@@ -0,0 +1,158 @@ ++// Copyright 2013-2023 The Cobra Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package doc ++ ++import ( ++ "bytes" ++ "fmt" ++ "io" ++ "os" ++ "path/filepath" ++ "sort" ++ "strings" ++ "time" ++ ++ "github.com/spf13/cobra" ++) ++ ++const markdownExtension = ".md" ++ ++func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error { ++ flags := cmd.NonInheritedFlags() ++ flags.SetOutput(buf) ++ if flags.HasAvailableFlags() { ++ buf.WriteString("### Options\n\n```\n") ++ flags.PrintDefaults() ++ buf.WriteString("```\n\n") ++ } ++ ++ parentFlags := cmd.InheritedFlags() ++ parentFlags.SetOutput(buf) ++ if parentFlags.HasAvailableFlags() { ++ buf.WriteString("### Options inherited from parent commands\n\n```\n") ++ parentFlags.PrintDefaults() ++ buf.WriteString("```\n\n") ++ } ++ return nil ++} ++ ++// GenMarkdown creates markdown output. ++func GenMarkdown(cmd *cobra.Command, w io.Writer) error { ++ return GenMarkdownCustom(cmd, w, func(s string) string { return s }) ++} ++ ++// GenMarkdownCustom creates custom markdown output. ++func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { ++ cmd.InitDefaultHelpCmd() ++ cmd.InitDefaultHelpFlag() ++ ++ buf := new(bytes.Buffer) ++ name := cmd.CommandPath() ++ ++ buf.WriteString("## " + name + "\n\n") ++ buf.WriteString(cmd.Short + "\n\n") ++ if len(cmd.Long) > 0 { ++ buf.WriteString("### Synopsis\n\n") ++ buf.WriteString(cmd.Long + "\n\n") ++ } ++ ++ if cmd.Runnable() { ++ buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine())) ++ } ++ ++ if len(cmd.Example) > 0 { ++ buf.WriteString("### Examples\n\n") ++ buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) ++ } ++ ++ if err := printOptions(buf, cmd, name); err != nil { ++ return err ++ } ++ if hasSeeAlso(cmd) { ++ buf.WriteString("### SEE ALSO\n\n") ++ if cmd.HasParent() { ++ parent := cmd.Parent() ++ pname := parent.CommandPath() ++ link := pname + markdownExtension ++ link = strings.ReplaceAll(link, " ", "_") ++ buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) ++ cmd.VisitParents(func(c *cobra.Command) { ++ if c.DisableAutoGenTag { ++ cmd.DisableAutoGenTag = c.DisableAutoGenTag ++ } ++ }) ++ } ++ ++ children := cmd.Commands() ++ sort.Sort(byName(children)) ++ ++ for _, child := range children { ++ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ cname := name + " " + child.Name() ++ link := cname + markdownExtension ++ link = strings.ReplaceAll(link, " ", "_") ++ buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short)) ++ } ++ buf.WriteString("\n") ++ } ++ if !cmd.DisableAutoGenTag { ++ buf.WriteString("###### Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "\n") ++ } ++ _, err := buf.WriteTo(w) ++ return err ++} ++ ++// GenMarkdownTree will generate a markdown page for this command and all ++// descendants in the directory given. The header may be nil. ++// This function may not work correctly if your command names have `-` in them. ++// If you have `cmd` with two subcmds, `sub` and `sub-third`, ++// and `sub` has a subcommand called `third`, it is undefined which ++// help output will be in the file `cmd-sub-third.1`. ++func GenMarkdownTree(cmd *cobra.Command, dir string) error { ++ identity := func(s string) string { return s } ++ emptyStr := func(s string) string { return "" } ++ return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) ++} ++ ++// GenMarkdownTreeCustom is the the same as GenMarkdownTree, but ++// with custom filePrepender and linkHandler. ++func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { ++ for _, c := range cmd.Commands() { ++ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil { ++ return err ++ } ++ } ++ ++ basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + markdownExtension ++ filename := filepath.Join(dir, basename) ++ f, err := os.Create(filename) ++ if err != nil { ++ return err ++ } ++ defer f.Close() ++ ++ if _, err := io.WriteString(f, filePrepender(filename)); err != nil { ++ return err ++ } ++ if err := GenMarkdownCustom(cmd, f, linkHandler); err != nil { ++ return err ++ } ++ return nil ++} +diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go +new file mode 100644 +index 000000000000..2cca6fd778de +--- /dev/null ++++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go +@@ -0,0 +1,186 @@ ++// Copyright 2013-2023 The Cobra Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package doc ++ ++import ( ++ "bytes" ++ "fmt" ++ "io" ++ "os" ++ "path/filepath" ++ "sort" ++ "strings" ++ "time" ++ ++ "github.com/spf13/cobra" ++) ++ ++func printOptionsReST(buf *bytes.Buffer, cmd *cobra.Command, name string) error { ++ flags := cmd.NonInheritedFlags() ++ flags.SetOutput(buf) ++ if flags.HasAvailableFlags() { ++ buf.WriteString("Options\n") ++ buf.WriteString("~~~~~~~\n\n::\n\n") ++ flags.PrintDefaults() ++ buf.WriteString("\n") ++ } ++ ++ parentFlags := cmd.InheritedFlags() ++ parentFlags.SetOutput(buf) ++ if parentFlags.HasAvailableFlags() { ++ buf.WriteString("Options inherited from parent commands\n") ++ buf.WriteString("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n") ++ parentFlags.PrintDefaults() ++ buf.WriteString("\n") ++ } ++ return nil ++} ++ ++// defaultLinkHandler for default ReST hyperlink markup ++func defaultLinkHandler(name, ref string) string { ++ return fmt.Sprintf("`%s <%s.rst>`_", name, ref) ++} ++ ++// GenReST creates reStructured Text output. ++func GenReST(cmd *cobra.Command, w io.Writer) error { ++ return GenReSTCustom(cmd, w, defaultLinkHandler) ++} ++ ++// GenReSTCustom creates custom reStructured Text output. ++func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, string) string) error { ++ cmd.InitDefaultHelpCmd() ++ cmd.InitDefaultHelpFlag() ++ ++ buf := new(bytes.Buffer) ++ name := cmd.CommandPath() ++ ++ short := cmd.Short ++ long := cmd.Long ++ if len(long) == 0 { ++ long = short ++ } ++ ref := strings.ReplaceAll(name, " ", "_") ++ ++ buf.WriteString(".. _" + ref + ":\n\n") ++ buf.WriteString(name + "\n") ++ buf.WriteString(strings.Repeat("-", len(name)) + "\n\n") ++ buf.WriteString(short + "\n\n") ++ buf.WriteString("Synopsis\n") ++ buf.WriteString("~~~~~~~~\n\n") ++ buf.WriteString("\n" + long + "\n\n") ++ ++ if cmd.Runnable() { ++ buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine())) ++ } ++ ++ if len(cmd.Example) > 0 { ++ buf.WriteString("Examples\n") ++ buf.WriteString("~~~~~~~~\n\n") ++ buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " "))) ++ } ++ ++ if err := printOptionsReST(buf, cmd, name); err != nil { ++ return err ++ } ++ if hasSeeAlso(cmd) { ++ buf.WriteString("SEE ALSO\n") ++ buf.WriteString("~~~~~~~~\n\n") ++ if cmd.HasParent() { ++ parent := cmd.Parent() ++ pname := parent.CommandPath() ++ ref = strings.ReplaceAll(pname, " ", "_") ++ buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short)) ++ cmd.VisitParents(func(c *cobra.Command) { ++ if c.DisableAutoGenTag { ++ cmd.DisableAutoGenTag = c.DisableAutoGenTag ++ } ++ }) ++ } ++ ++ children := cmd.Commands() ++ sort.Sort(byName(children)) ++ ++ for _, child := range children { ++ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ cname := name + " " + child.Name() ++ ref = strings.ReplaceAll(cname, " ", "_") ++ buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short)) ++ } ++ buf.WriteString("\n") ++ } ++ if !cmd.DisableAutoGenTag { ++ buf.WriteString("*Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "*\n") ++ } ++ _, err := buf.WriteTo(w) ++ return err ++} ++ ++// GenReSTTree will generate a ReST page for this command and all ++// descendants in the directory given. ++// This function may not work correctly if your command names have `-` in them. ++// If you have `cmd` with two subcmds, `sub` and `sub-third`, ++// and `sub` has a subcommand called `third`, it is undefined which ++// help output will be in the file `cmd-sub-third.1`. ++func GenReSTTree(cmd *cobra.Command, dir string) error { ++ emptyStr := func(s string) string { return "" } ++ return GenReSTTreeCustom(cmd, dir, emptyStr, defaultLinkHandler) ++} ++ ++// GenReSTTreeCustom is the the same as GenReSTTree, but ++// with custom filePrepender and linkHandler. ++func GenReSTTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error { ++ for _, c := range cmd.Commands() { ++ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ if err := GenReSTTreeCustom(c, dir, filePrepender, linkHandler); err != nil { ++ return err ++ } ++ } ++ ++ basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".rst" ++ filename := filepath.Join(dir, basename) ++ f, err := os.Create(filename) ++ if err != nil { ++ return err ++ } ++ defer f.Close() ++ ++ if _, err := io.WriteString(f, filePrepender(filename)); err != nil { ++ return err ++ } ++ if err := GenReSTCustom(cmd, f, linkHandler); err != nil { ++ return err ++ } ++ return nil ++} ++ ++// indentString adapted from: https://github.com/kr/text/blob/main/indent.go ++func indentString(s, p string) string { ++ var res []byte ++ b := []byte(s) ++ prefix := []byte(p) ++ bol := true ++ for _, c := range b { ++ if bol && c != '\n' { ++ res = append(res, prefix...) ++ } ++ res = append(res, c) ++ bol = c == '\n' ++ } ++ return string(res) ++} +diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go +new file mode 100644 +index 000000000000..0aaa07a166d8 +--- /dev/null ++++ b/vendor/github.com/spf13/cobra/doc/util.go +@@ -0,0 +1,52 @@ ++// Copyright 2013-2023 The Cobra Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package doc ++ ++import ( ++ "strings" ++ ++ "github.com/spf13/cobra" ++) ++ ++// Test to see if we have a reason to print See Also information in docs ++// Basically this is a test for a parent command or a subcommand which is ++// both not deprecated and not the autogenerated help command. ++func hasSeeAlso(cmd *cobra.Command) bool { ++ if cmd.HasParent() { ++ return true ++ } ++ for _, c := range cmd.Commands() { ++ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ return true ++ } ++ return false ++} ++ ++// Temporary workaround for yaml lib generating incorrect yaml with long strings ++// that do not contain \n. ++func forceMultiLine(s string) string { ++ if len(s) > 60 && !strings.Contains(s, "\n") { ++ s = s + "\n" ++ } ++ return s ++} ++ ++type byName []*cobra.Command ++ ++func (s byName) Len() int { return len(s) } ++func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } ++func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } +diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go +new file mode 100644 +index 000000000000..2b26d6ec0f3e +--- /dev/null ++++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go +@@ -0,0 +1,175 @@ ++// Copyright 2013-2023 The Cobra Authors ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package doc ++ ++import ( ++ "fmt" ++ "io" ++ "os" ++ "path/filepath" ++ "sort" ++ "strings" ++ ++ "github.com/spf13/cobra" ++ "github.com/spf13/pflag" ++ "gopkg.in/yaml.v3" ++) ++ ++type cmdOption struct { ++ Name string ++ Shorthand string `yaml:",omitempty"` ++ DefaultValue string `yaml:"default_value,omitempty"` ++ Usage string `yaml:",omitempty"` ++} ++ ++type cmdDoc struct { ++ Name string ++ Synopsis string `yaml:",omitempty"` ++ Description string `yaml:",omitempty"` ++ Usage string `yaml:",omitempty"` ++ Options []cmdOption `yaml:",omitempty"` ++ InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"` ++ Example string `yaml:",omitempty"` ++ SeeAlso []string `yaml:"see_also,omitempty"` ++} ++ ++// GenYamlTree creates yaml structured ref files for this command and all descendants ++// in the directory given. This function may not work ++// correctly if your command names have `-` in them. If you have `cmd` with two ++// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third` ++// it is undefined which help output will be in the file `cmd-sub-third.1`. ++func GenYamlTree(cmd *cobra.Command, dir string) error { ++ identity := func(s string) string { return s } ++ emptyStr := func(s string) string { return "" } ++ return GenYamlTreeCustom(cmd, dir, emptyStr, identity) ++} ++ ++// GenYamlTreeCustom creates yaml structured ref files. ++func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error { ++ for _, c := range cmd.Commands() { ++ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil { ++ return err ++ } ++ } ++ ++ basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".yaml" ++ filename := filepath.Join(dir, basename) ++ f, err := os.Create(filename) ++ if err != nil { ++ return err ++ } ++ defer f.Close() ++ ++ if _, err := io.WriteString(f, filePrepender(filename)); err != nil { ++ return err ++ } ++ if err := GenYamlCustom(cmd, f, linkHandler); err != nil { ++ return err ++ } ++ return nil ++} ++ ++// GenYaml creates yaml output. ++func GenYaml(cmd *cobra.Command, w io.Writer) error { ++ return GenYamlCustom(cmd, w, func(s string) string { return s }) ++} ++ ++// GenYamlCustom creates custom yaml output. ++func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error { ++ cmd.InitDefaultHelpCmd() ++ cmd.InitDefaultHelpFlag() ++ ++ yamlDoc := cmdDoc{} ++ yamlDoc.Name = cmd.CommandPath() ++ ++ yamlDoc.Synopsis = forceMultiLine(cmd.Short) ++ yamlDoc.Description = forceMultiLine(cmd.Long) ++ ++ if cmd.Runnable() { ++ yamlDoc.Usage = cmd.UseLine() ++ } ++ ++ if len(cmd.Example) > 0 { ++ yamlDoc.Example = cmd.Example ++ } ++ ++ flags := cmd.NonInheritedFlags() ++ if flags.HasFlags() { ++ yamlDoc.Options = genFlagResult(flags) ++ } ++ flags = cmd.InheritedFlags() ++ if flags.HasFlags() { ++ yamlDoc.InheritedOptions = genFlagResult(flags) ++ } ++ ++ if hasSeeAlso(cmd) { ++ result := []string{} ++ if cmd.HasParent() { ++ parent := cmd.Parent() ++ result = append(result, parent.CommandPath()+" - "+parent.Short) ++ } ++ children := cmd.Commands() ++ sort.Sort(byName(children)) ++ for _, child := range children { ++ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() { ++ continue ++ } ++ result = append(result, child.CommandPath()+" - "+child.Short) ++ } ++ yamlDoc.SeeAlso = result ++ } ++ ++ final, err := yaml.Marshal(&yamlDoc) ++ if err != nil { ++ fmt.Println(err) ++ os.Exit(1) ++ } ++ ++ if _, err := w.Write(final); err != nil { ++ return err ++ } ++ return nil ++} ++ ++func genFlagResult(flags *pflag.FlagSet) []cmdOption { ++ var result []cmdOption ++ ++ flags.VisitAll(func(flag *pflag.Flag) { ++ // Todo, when we mark a shorthand is deprecated, but specify an empty message. ++ // The flag.ShorthandDeprecated is empty as the shorthand is deprecated. ++ // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok. ++ if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 { ++ opt := cmdOption{ ++ flag.Name, ++ flag.Shorthand, ++ flag.DefValue, ++ forceMultiLine(flag.Usage), ++ } ++ result = append(result, opt) ++ } else { ++ opt := cmdOption{ ++ Name: flag.Name, ++ DefaultValue: forceMultiLine(flag.DefValue), ++ Usage: forceMultiLine(flag.Usage), ++ } ++ result = append(result, opt) ++ } ++ }) ++ ++ return result ++} +diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE +new file mode 100644 +index 000000000000..2683e4bb1f24 +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/LICENSE +@@ -0,0 +1,50 @@ ++ ++This project is covered by two different licenses: MIT and Apache. ++ ++#### MIT License #### ++ ++The following files were ported to Go from C files of libyaml, and thus ++are still covered by their original MIT license, with the additional ++copyright staring in 2011 when the project was ported over: ++ ++ apic.go emitterc.go parserc.go readerc.go scannerc.go ++ writerc.go yamlh.go yamlprivateh.go ++ ++Copyright (c) 2006-2010 Kirill Simonov ++Copyright (c) 2006-2011 Kirill Simonov ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy of ++this software and associated documentation files (the "Software"), to deal in ++the Software without restriction, including without limitation the rights to ++use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++of the Software, and to permit persons to whom the Software is furnished to do ++so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in all ++copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++SOFTWARE. ++ ++### Apache License ### ++ ++All the remaining project files are covered by the Apache license: ++ ++Copyright (c) 2011-2019 Canonical Ltd ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. +diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE +new file mode 100644 +index 000000000000..866d74a7ad79 +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/NOTICE +@@ -0,0 +1,13 @@ ++Copyright 2011-2016 Canonical Ltd. ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. +diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md +new file mode 100644 +index 000000000000..08eb1babddfa +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/README.md +@@ -0,0 +1,150 @@ ++# YAML support for the Go language ++ ++Introduction ++------------ ++ ++The yaml package enables Go programs to comfortably encode and decode YAML ++values. It was developed within [Canonical](https://www.canonical.com) as ++part of the [juju](https://juju.ubuntu.com) project, and is based on a ++pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) ++C library to parse and generate YAML data quickly and reliably. ++ ++Compatibility ++------------- ++ ++The yaml package supports most of YAML 1.2, but preserves some behavior ++from 1.1 for backwards compatibility. ++ ++Specifically, as of v3 of the yaml package: ++ ++ - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being ++ decoded into a typed bool value. Otherwise they behave as a string. Booleans ++ in YAML 1.2 are _true/false_ only. ++ - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ ++ as specified in YAML 1.2, because most parsers still use the old format. ++ Octals in the _0o777_ format are supported though, so new files work. ++ - Does not support base-60 floats. These are gone from YAML 1.2, and were ++ actually never supported by this package as it's clearly a poor choice. ++ ++and offers backwards ++compatibility with YAML 1.1 in some cases. ++1.2, including support for ++anchors, tags, map merging, etc. Multi-document unmarshalling is not yet ++implemented, and base-60 floats from YAML 1.1 are purposefully not ++supported since they're a poor design and are gone in YAML 1.2. ++ ++Installation and usage ++---------------------- ++ ++The import path for the package is *gopkg.in/yaml.v3*. ++ ++To install it, run: ++ ++ go get gopkg.in/yaml.v3 ++ ++API documentation ++----------------- ++ ++If opened in a browser, the import path itself leads to the API documentation: ++ ++ - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) ++ ++API stability ++------------- ++ ++The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). ++ ++ ++License ++------- ++ ++The yaml package is licensed under the MIT and Apache License 2.0 licenses. ++Please see the LICENSE file for details. ++ ++ ++Example ++------- ++ ++```Go ++package main ++ ++import ( ++ "fmt" ++ "log" ++ ++ "gopkg.in/yaml.v3" ++) ++ ++var data = ` ++a: Easy! ++b: ++ c: 2 ++ d: [3, 4] ++` ++ ++// Note: struct fields must be public in order for unmarshal to ++// correctly populate the data. ++type T struct { ++ A string ++ B struct { ++ RenamedC int `yaml:"c"` ++ D []int `yaml:",flow"` ++ } ++} ++ ++func main() { ++ t := T{} ++ ++ err := yaml.Unmarshal([]byte(data), &t) ++ if err != nil { ++ log.Fatalf("error: %v", err) ++ } ++ fmt.Printf("--- t:\n%v\n\n", t) ++ ++ d, err := yaml.Marshal(&t) ++ if err != nil { ++ log.Fatalf("error: %v", err) ++ } ++ fmt.Printf("--- t dump:\n%s\n\n", string(d)) ++ ++ m := make(map[interface{}]interface{}) ++ ++ err = yaml.Unmarshal([]byte(data), &m) ++ if err != nil { ++ log.Fatalf("error: %v", err) ++ } ++ fmt.Printf("--- m:\n%v\n\n", m) ++ ++ d, err = yaml.Marshal(&m) ++ if err != nil { ++ log.Fatalf("error: %v", err) ++ } ++ fmt.Printf("--- m dump:\n%s\n\n", string(d)) ++} ++``` ++ ++This example will generate the following output: ++ ++``` ++--- t: ++{Easy! {2 [3 4]}} ++ ++--- t dump: ++a: Easy! ++b: ++ c: 2 ++ d: [3, 4] ++ ++ ++--- m: ++map[a:Easy! b:map[c:2 d:[3 4]]] ++ ++--- m dump: ++a: Easy! ++b: ++ c: 2 ++ d: ++ - 3 ++ - 4 ++``` ++ +diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go +new file mode 100644 +index 000000000000..ae7d049f182a +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/apic.go +@@ -0,0 +1,747 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// Copyright (c) 2006-2010 Kirill Simonov ++// ++// Permission is hereby granted, free of charge, to any person obtaining a copy of ++// this software and associated documentation files (the "Software"), to deal in ++// the Software without restriction, including without limitation the rights to ++// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++// of the Software, and to permit persons to whom the Software is furnished to do ++// so, subject to the following conditions: ++// ++// The above copyright notice and this permission notice shall be included in all ++// copies or substantial portions of the Software. ++// ++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++// SOFTWARE. ++ ++package yaml ++ ++import ( ++ "io" ++) ++ ++func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { ++ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) ++ ++ // Check if we can move the queue at the beginning of the buffer. ++ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { ++ if parser.tokens_head != len(parser.tokens) { ++ copy(parser.tokens, parser.tokens[parser.tokens_head:]) ++ } ++ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] ++ parser.tokens_head = 0 ++ } ++ parser.tokens = append(parser.tokens, *token) ++ if pos < 0 { ++ return ++ } ++ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) ++ parser.tokens[parser.tokens_head+pos] = *token ++} ++ ++// Create a new parser object. ++func yaml_parser_initialize(parser *yaml_parser_t) bool { ++ *parser = yaml_parser_t{ ++ raw_buffer: make([]byte, 0, input_raw_buffer_size), ++ buffer: make([]byte, 0, input_buffer_size), ++ } ++ return true ++} ++ ++// Destroy a parser object. ++func yaml_parser_delete(parser *yaml_parser_t) { ++ *parser = yaml_parser_t{} ++} ++ ++// String read handler. ++func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { ++ if parser.input_pos == len(parser.input) { ++ return 0, io.EOF ++ } ++ n = copy(buffer, parser.input[parser.input_pos:]) ++ parser.input_pos += n ++ return n, nil ++} ++ ++// Reader read handler. ++func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { ++ return parser.input_reader.Read(buffer) ++} ++ ++// Set a string input. ++func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { ++ if parser.read_handler != nil { ++ panic("must set the input source only once") ++ } ++ parser.read_handler = yaml_string_read_handler ++ parser.input = input ++ parser.input_pos = 0 ++} ++ ++// Set a file input. ++func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { ++ if parser.read_handler != nil { ++ panic("must set the input source only once") ++ } ++ parser.read_handler = yaml_reader_read_handler ++ parser.input_reader = r ++} ++ ++// Set the source encoding. ++func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { ++ if parser.encoding != yaml_ANY_ENCODING { ++ panic("must set the encoding only once") ++ } ++ parser.encoding = encoding ++} ++ ++// Create a new emitter object. ++func yaml_emitter_initialize(emitter *yaml_emitter_t) { ++ *emitter = yaml_emitter_t{ ++ buffer: make([]byte, output_buffer_size), ++ raw_buffer: make([]byte, 0, output_raw_buffer_size), ++ states: make([]yaml_emitter_state_t, 0, initial_stack_size), ++ events: make([]yaml_event_t, 0, initial_queue_size), ++ best_width: -1, ++ } ++} ++ ++// Destroy an emitter object. ++func yaml_emitter_delete(emitter *yaml_emitter_t) { ++ *emitter = yaml_emitter_t{} ++} ++ ++// String write handler. ++func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { ++ *emitter.output_buffer = append(*emitter.output_buffer, buffer...) ++ return nil ++} ++ ++// yaml_writer_write_handler uses emitter.output_writer to write the ++// emitted text. ++func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { ++ _, err := emitter.output_writer.Write(buffer) ++ return err ++} ++ ++// Set a string output. ++func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { ++ if emitter.write_handler != nil { ++ panic("must set the output target only once") ++ } ++ emitter.write_handler = yaml_string_write_handler ++ emitter.output_buffer = output_buffer ++} ++ ++// Set a file output. ++func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { ++ if emitter.write_handler != nil { ++ panic("must set the output target only once") ++ } ++ emitter.write_handler = yaml_writer_write_handler ++ emitter.output_writer = w ++} ++ ++// Set the output encoding. ++func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { ++ if emitter.encoding != yaml_ANY_ENCODING { ++ panic("must set the output encoding only once") ++ } ++ emitter.encoding = encoding ++} ++ ++// Set the canonical output style. ++func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { ++ emitter.canonical = canonical ++} ++ ++// Set the indentation increment. ++func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { ++ if indent < 2 || indent > 9 { ++ indent = 2 ++ } ++ emitter.best_indent = indent ++} ++ ++// Set the preferred line width. ++func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { ++ if width < 0 { ++ width = -1 ++ } ++ emitter.best_width = width ++} ++ ++// Set if unescaped non-ASCII characters are allowed. ++func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { ++ emitter.unicode = unicode ++} ++ ++// Set the preferred line break character. ++func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { ++ emitter.line_break = line_break ++} ++ ++///* ++// * Destroy a token object. ++// */ ++// ++//YAML_DECLARE(void) ++//yaml_token_delete(yaml_token_t *token) ++//{ ++// assert(token); // Non-NULL token object expected. ++// ++// switch (token.type) ++// { ++// case YAML_TAG_DIRECTIVE_TOKEN: ++// yaml_free(token.data.tag_directive.handle); ++// yaml_free(token.data.tag_directive.prefix); ++// break; ++// ++// case YAML_ALIAS_TOKEN: ++// yaml_free(token.data.alias.value); ++// break; ++// ++// case YAML_ANCHOR_TOKEN: ++// yaml_free(token.data.anchor.value); ++// break; ++// ++// case YAML_TAG_TOKEN: ++// yaml_free(token.data.tag.handle); ++// yaml_free(token.data.tag.suffix); ++// break; ++// ++// case YAML_SCALAR_TOKEN: ++// yaml_free(token.data.scalar.value); ++// break; ++// ++// default: ++// break; ++// } ++// ++// memset(token, 0, sizeof(yaml_token_t)); ++//} ++// ++///* ++// * Check if a string is a valid UTF-8 sequence. ++// * ++// * Check 'reader.c' for more details on UTF-8 encoding. ++// */ ++// ++//static int ++//yaml_check_utf8(yaml_char_t *start, size_t length) ++//{ ++// yaml_char_t *end = start+length; ++// yaml_char_t *pointer = start; ++// ++// while (pointer < end) { ++// unsigned char octet; ++// unsigned int width; ++// unsigned int value; ++// size_t k; ++// ++// octet = pointer[0]; ++// width = (octet & 0x80) == 0x00 ? 1 : ++// (octet & 0xE0) == 0xC0 ? 2 : ++// (octet & 0xF0) == 0xE0 ? 3 : ++// (octet & 0xF8) == 0xF0 ? 4 : 0; ++// value = (octet & 0x80) == 0x00 ? octet & 0x7F : ++// (octet & 0xE0) == 0xC0 ? octet & 0x1F : ++// (octet & 0xF0) == 0xE0 ? octet & 0x0F : ++// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; ++// if (!width) return 0; ++// if (pointer+width > end) return 0; ++// for (k = 1; k < width; k ++) { ++// octet = pointer[k]; ++// if ((octet & 0xC0) != 0x80) return 0; ++// value = (value << 6) + (octet & 0x3F); ++// } ++// if (!((width == 1) || ++// (width == 2 && value >= 0x80) || ++// (width == 3 && value >= 0x800) || ++// (width == 4 && value >= 0x10000))) return 0; ++// ++// pointer += width; ++// } ++// ++// return 1; ++//} ++// ++ ++// Create STREAM-START. ++func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { ++ *event = yaml_event_t{ ++ typ: yaml_STREAM_START_EVENT, ++ encoding: encoding, ++ } ++} ++ ++// Create STREAM-END. ++func yaml_stream_end_event_initialize(event *yaml_event_t) { ++ *event = yaml_event_t{ ++ typ: yaml_STREAM_END_EVENT, ++ } ++} ++ ++// Create DOCUMENT-START. ++func yaml_document_start_event_initialize( ++ event *yaml_event_t, ++ version_directive *yaml_version_directive_t, ++ tag_directives []yaml_tag_directive_t, ++ implicit bool, ++) { ++ *event = yaml_event_t{ ++ typ: yaml_DOCUMENT_START_EVENT, ++ version_directive: version_directive, ++ tag_directives: tag_directives, ++ implicit: implicit, ++ } ++} ++ ++// Create DOCUMENT-END. ++func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { ++ *event = yaml_event_t{ ++ typ: yaml_DOCUMENT_END_EVENT, ++ implicit: implicit, ++ } ++} ++ ++// Create ALIAS. ++func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { ++ *event = yaml_event_t{ ++ typ: yaml_ALIAS_EVENT, ++ anchor: anchor, ++ } ++ return true ++} ++ ++// Create SCALAR. ++func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { ++ *event = yaml_event_t{ ++ typ: yaml_SCALAR_EVENT, ++ anchor: anchor, ++ tag: tag, ++ value: value, ++ implicit: plain_implicit, ++ quoted_implicit: quoted_implicit, ++ style: yaml_style_t(style), ++ } ++ return true ++} ++ ++// Create SEQUENCE-START. ++func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { ++ *event = yaml_event_t{ ++ typ: yaml_SEQUENCE_START_EVENT, ++ anchor: anchor, ++ tag: tag, ++ implicit: implicit, ++ style: yaml_style_t(style), ++ } ++ return true ++} ++ ++// Create SEQUENCE-END. ++func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { ++ *event = yaml_event_t{ ++ typ: yaml_SEQUENCE_END_EVENT, ++ } ++ return true ++} ++ ++// Create MAPPING-START. ++func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { ++ *event = yaml_event_t{ ++ typ: yaml_MAPPING_START_EVENT, ++ anchor: anchor, ++ tag: tag, ++ implicit: implicit, ++ style: yaml_style_t(style), ++ } ++} ++ ++// Create MAPPING-END. ++func yaml_mapping_end_event_initialize(event *yaml_event_t) { ++ *event = yaml_event_t{ ++ typ: yaml_MAPPING_END_EVENT, ++ } ++} ++ ++// Destroy an event object. ++func yaml_event_delete(event *yaml_event_t) { ++ *event = yaml_event_t{} ++} ++ ++///* ++// * Create a document object. ++// */ ++// ++//YAML_DECLARE(int) ++//yaml_document_initialize(document *yaml_document_t, ++// version_directive *yaml_version_directive_t, ++// tag_directives_start *yaml_tag_directive_t, ++// tag_directives_end *yaml_tag_directive_t, ++// start_implicit int, end_implicit int) ++//{ ++// struct { ++// error yaml_error_type_t ++// } context ++// struct { ++// start *yaml_node_t ++// end *yaml_node_t ++// top *yaml_node_t ++// } nodes = { NULL, NULL, NULL } ++// version_directive_copy *yaml_version_directive_t = NULL ++// struct { ++// start *yaml_tag_directive_t ++// end *yaml_tag_directive_t ++// top *yaml_tag_directive_t ++// } tag_directives_copy = { NULL, NULL, NULL } ++// value yaml_tag_directive_t = { NULL, NULL } ++// mark yaml_mark_t = { 0, 0, 0 } ++// ++// assert(document) // Non-NULL document object is expected. ++// assert((tag_directives_start && tag_directives_end) || ++// (tag_directives_start == tag_directives_end)) ++// // Valid tag directives are expected. ++// ++// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error ++// ++// if (version_directive) { ++// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) ++// if (!version_directive_copy) goto error ++// version_directive_copy.major = version_directive.major ++// version_directive_copy.minor = version_directive.minor ++// } ++// ++// if (tag_directives_start != tag_directives_end) { ++// tag_directive *yaml_tag_directive_t ++// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) ++// goto error ++// for (tag_directive = tag_directives_start ++// tag_directive != tag_directives_end; tag_directive ++) { ++// assert(tag_directive.handle) ++// assert(tag_directive.prefix) ++// if (!yaml_check_utf8(tag_directive.handle, ++// strlen((char *)tag_directive.handle))) ++// goto error ++// if (!yaml_check_utf8(tag_directive.prefix, ++// strlen((char *)tag_directive.prefix))) ++// goto error ++// value.handle = yaml_strdup(tag_directive.handle) ++// value.prefix = yaml_strdup(tag_directive.prefix) ++// if (!value.handle || !value.prefix) goto error ++// if (!PUSH(&context, tag_directives_copy, value)) ++// goto error ++// value.handle = NULL ++// value.prefix = NULL ++// } ++// } ++// ++// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, ++// tag_directives_copy.start, tag_directives_copy.top, ++// start_implicit, end_implicit, mark, mark) ++// ++// return 1 ++// ++//error: ++// STACK_DEL(&context, nodes) ++// yaml_free(version_directive_copy) ++// while (!STACK_EMPTY(&context, tag_directives_copy)) { ++// value yaml_tag_directive_t = POP(&context, tag_directives_copy) ++// yaml_free(value.handle) ++// yaml_free(value.prefix) ++// } ++// STACK_DEL(&context, tag_directives_copy) ++// yaml_free(value.handle) ++// yaml_free(value.prefix) ++// ++// return 0 ++//} ++// ++///* ++// * Destroy a document object. ++// */ ++// ++//YAML_DECLARE(void) ++//yaml_document_delete(document *yaml_document_t) ++//{ ++// struct { ++// error yaml_error_type_t ++// } context ++// tag_directive *yaml_tag_directive_t ++// ++// context.error = YAML_NO_ERROR // Eliminate a compiler warning. ++// ++// assert(document) // Non-NULL document object is expected. ++// ++// while (!STACK_EMPTY(&context, document.nodes)) { ++// node yaml_node_t = POP(&context, document.nodes) ++// yaml_free(node.tag) ++// switch (node.type) { ++// case YAML_SCALAR_NODE: ++// yaml_free(node.data.scalar.value) ++// break ++// case YAML_SEQUENCE_NODE: ++// STACK_DEL(&context, node.data.sequence.items) ++// break ++// case YAML_MAPPING_NODE: ++// STACK_DEL(&context, node.data.mapping.pairs) ++// break ++// default: ++// assert(0) // Should not happen. ++// } ++// } ++// STACK_DEL(&context, document.nodes) ++// ++// yaml_free(document.version_directive) ++// for (tag_directive = document.tag_directives.start ++// tag_directive != document.tag_directives.end ++// tag_directive++) { ++// yaml_free(tag_directive.handle) ++// yaml_free(tag_directive.prefix) ++// } ++// yaml_free(document.tag_directives.start) ++// ++// memset(document, 0, sizeof(yaml_document_t)) ++//} ++// ++///** ++// * Get a document node. ++// */ ++// ++//YAML_DECLARE(yaml_node_t *) ++//yaml_document_get_node(document *yaml_document_t, index int) ++//{ ++// assert(document) // Non-NULL document object is expected. ++// ++// if (index > 0 && document.nodes.start + index <= document.nodes.top) { ++// return document.nodes.start + index - 1 ++// } ++// return NULL ++//} ++// ++///** ++// * Get the root object. ++// */ ++// ++//YAML_DECLARE(yaml_node_t *) ++//yaml_document_get_root_node(document *yaml_document_t) ++//{ ++// assert(document) // Non-NULL document object is expected. ++// ++// if (document.nodes.top != document.nodes.start) { ++// return document.nodes.start ++// } ++// return NULL ++//} ++// ++///* ++// * Add a scalar node to a document. ++// */ ++// ++//YAML_DECLARE(int) ++//yaml_document_add_scalar(document *yaml_document_t, ++// tag *yaml_char_t, value *yaml_char_t, length int, ++// style yaml_scalar_style_t) ++//{ ++// struct { ++// error yaml_error_type_t ++// } context ++// mark yaml_mark_t = { 0, 0, 0 } ++// tag_copy *yaml_char_t = NULL ++// value_copy *yaml_char_t = NULL ++// node yaml_node_t ++// ++// assert(document) // Non-NULL document object is expected. ++// assert(value) // Non-NULL value is expected. ++// ++// if (!tag) { ++// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG ++// } ++// ++// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error ++// tag_copy = yaml_strdup(tag) ++// if (!tag_copy) goto error ++// ++// if (length < 0) { ++// length = strlen((char *)value) ++// } ++// ++// if (!yaml_check_utf8(value, length)) goto error ++// value_copy = yaml_malloc(length+1) ++// if (!value_copy) goto error ++// memcpy(value_copy, value, length) ++// value_copy[length] = '\0' ++// ++// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) ++// if (!PUSH(&context, document.nodes, node)) goto error ++// ++// return document.nodes.top - document.nodes.start ++// ++//error: ++// yaml_free(tag_copy) ++// yaml_free(value_copy) ++// ++// return 0 ++//} ++// ++///* ++// * Add a sequence node to a document. ++// */ ++// ++//YAML_DECLARE(int) ++//yaml_document_add_sequence(document *yaml_document_t, ++// tag *yaml_char_t, style yaml_sequence_style_t) ++//{ ++// struct { ++// error yaml_error_type_t ++// } context ++// mark yaml_mark_t = { 0, 0, 0 } ++// tag_copy *yaml_char_t = NULL ++// struct { ++// start *yaml_node_item_t ++// end *yaml_node_item_t ++// top *yaml_node_item_t ++// } items = { NULL, NULL, NULL } ++// node yaml_node_t ++// ++// assert(document) // Non-NULL document object is expected. ++// ++// if (!tag) { ++// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG ++// } ++// ++// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error ++// tag_copy = yaml_strdup(tag) ++// if (!tag_copy) goto error ++// ++// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error ++// ++// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, ++// style, mark, mark) ++// if (!PUSH(&context, document.nodes, node)) goto error ++// ++// return document.nodes.top - document.nodes.start ++// ++//error: ++// STACK_DEL(&context, items) ++// yaml_free(tag_copy) ++// ++// return 0 ++//} ++// ++///* ++// * Add a mapping node to a document. ++// */ ++// ++//YAML_DECLARE(int) ++//yaml_document_add_mapping(document *yaml_document_t, ++// tag *yaml_char_t, style yaml_mapping_style_t) ++//{ ++// struct { ++// error yaml_error_type_t ++// } context ++// mark yaml_mark_t = { 0, 0, 0 } ++// tag_copy *yaml_char_t = NULL ++// struct { ++// start *yaml_node_pair_t ++// end *yaml_node_pair_t ++// top *yaml_node_pair_t ++// } pairs = { NULL, NULL, NULL } ++// node yaml_node_t ++// ++// assert(document) // Non-NULL document object is expected. ++// ++// if (!tag) { ++// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG ++// } ++// ++// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error ++// tag_copy = yaml_strdup(tag) ++// if (!tag_copy) goto error ++// ++// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error ++// ++// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, ++// style, mark, mark) ++// if (!PUSH(&context, document.nodes, node)) goto error ++// ++// return document.nodes.top - document.nodes.start ++// ++//error: ++// STACK_DEL(&context, pairs) ++// yaml_free(tag_copy) ++// ++// return 0 ++//} ++// ++///* ++// * Append an item to a sequence node. ++// */ ++// ++//YAML_DECLARE(int) ++//yaml_document_append_sequence_item(document *yaml_document_t, ++// sequence int, item int) ++//{ ++// struct { ++// error yaml_error_type_t ++// } context ++// ++// assert(document) // Non-NULL document is required. ++// assert(sequence > 0 ++// && document.nodes.start + sequence <= document.nodes.top) ++// // Valid sequence id is required. ++// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) ++// // A sequence node is required. ++// assert(item > 0 && document.nodes.start + item <= document.nodes.top) ++// // Valid item id is required. ++// ++// if (!PUSH(&context, ++// document.nodes.start[sequence-1].data.sequence.items, item)) ++// return 0 ++// ++// return 1 ++//} ++// ++///* ++// * Append a pair of a key and a value to a mapping node. ++// */ ++// ++//YAML_DECLARE(int) ++//yaml_document_append_mapping_pair(document *yaml_document_t, ++// mapping int, key int, value int) ++//{ ++// struct { ++// error yaml_error_type_t ++// } context ++// ++// pair yaml_node_pair_t ++// ++// assert(document) // Non-NULL document is required. ++// assert(mapping > 0 ++// && document.nodes.start + mapping <= document.nodes.top) ++// // Valid mapping id is required. ++// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) ++// // A mapping node is required. ++// assert(key > 0 && document.nodes.start + key <= document.nodes.top) ++// // Valid key id is required. ++// assert(value > 0 && document.nodes.start + value <= document.nodes.top) ++// // Valid value id is required. ++// ++// pair.key = key ++// pair.value = value ++// ++// if (!PUSH(&context, ++// document.nodes.start[mapping-1].data.mapping.pairs, pair)) ++// return 0 ++// ++// return 1 ++//} ++// ++// +diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go +new file mode 100644 +index 000000000000..0173b6982e84 +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/decode.go +@@ -0,0 +1,1000 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package yaml ++ ++import ( ++ "encoding" ++ "encoding/base64" ++ "fmt" ++ "io" ++ "math" ++ "reflect" ++ "strconv" ++ "time" ++) ++ ++// ---------------------------------------------------------------------------- ++// Parser, produces a node tree out of a libyaml event stream. ++ ++type parser struct { ++ parser yaml_parser_t ++ event yaml_event_t ++ doc *Node ++ anchors map[string]*Node ++ doneInit bool ++ textless bool ++} ++ ++func newParser(b []byte) *parser { ++ p := parser{} ++ if !yaml_parser_initialize(&p.parser) { ++ panic("failed to initialize YAML emitter") ++ } ++ if len(b) == 0 { ++ b = []byte{'\n'} ++ } ++ yaml_parser_set_input_string(&p.parser, b) ++ return &p ++} ++ ++func newParserFromReader(r io.Reader) *parser { ++ p := parser{} ++ if !yaml_parser_initialize(&p.parser) { ++ panic("failed to initialize YAML emitter") ++ } ++ yaml_parser_set_input_reader(&p.parser, r) ++ return &p ++} ++ ++func (p *parser) init() { ++ if p.doneInit { ++ return ++ } ++ p.anchors = make(map[string]*Node) ++ p.expect(yaml_STREAM_START_EVENT) ++ p.doneInit = true ++} ++ ++func (p *parser) destroy() { ++ if p.event.typ != yaml_NO_EVENT { ++ yaml_event_delete(&p.event) ++ } ++ yaml_parser_delete(&p.parser) ++} ++ ++// expect consumes an event from the event stream and ++// checks that it's of the expected type. ++func (p *parser) expect(e yaml_event_type_t) { ++ if p.event.typ == yaml_NO_EVENT { ++ if !yaml_parser_parse(&p.parser, &p.event) { ++ p.fail() ++ } ++ } ++ if p.event.typ == yaml_STREAM_END_EVENT { ++ failf("attempted to go past the end of stream; corrupted value?") ++ } ++ if p.event.typ != e { ++ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) ++ p.fail() ++ } ++ yaml_event_delete(&p.event) ++ p.event.typ = yaml_NO_EVENT ++} ++ ++// peek peeks at the next event in the event stream, ++// puts the results into p.event and returns the event type. ++func (p *parser) peek() yaml_event_type_t { ++ if p.event.typ != yaml_NO_EVENT { ++ return p.event.typ ++ } ++ // It's curious choice from the underlying API to generally return a ++ // positive result on success, but on this case return true in an error ++ // scenario. This was the source of bugs in the past (issue #666). ++ if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { ++ p.fail() ++ } ++ return p.event.typ ++} ++ ++func (p *parser) fail() { ++ var where string ++ var line int ++ if p.parser.context_mark.line != 0 { ++ line = p.parser.context_mark.line ++ // Scanner errors don't iterate line before returning error ++ if p.parser.error == yaml_SCANNER_ERROR { ++ line++ ++ } ++ } else if p.parser.problem_mark.line != 0 { ++ line = p.parser.problem_mark.line ++ // Scanner errors don't iterate line before returning error ++ if p.parser.error == yaml_SCANNER_ERROR { ++ line++ ++ } ++ } ++ if line != 0 { ++ where = "line " + strconv.Itoa(line) + ": " ++ } ++ var msg string ++ if len(p.parser.problem) > 0 { ++ msg = p.parser.problem ++ } else { ++ msg = "unknown problem parsing YAML content" ++ } ++ failf("%s%s", where, msg) ++} ++ ++func (p *parser) anchor(n *Node, anchor []byte) { ++ if anchor != nil { ++ n.Anchor = string(anchor) ++ p.anchors[n.Anchor] = n ++ } ++} ++ ++func (p *parser) parse() *Node { ++ p.init() ++ switch p.peek() { ++ case yaml_SCALAR_EVENT: ++ return p.scalar() ++ case yaml_ALIAS_EVENT: ++ return p.alias() ++ case yaml_MAPPING_START_EVENT: ++ return p.mapping() ++ case yaml_SEQUENCE_START_EVENT: ++ return p.sequence() ++ case yaml_DOCUMENT_START_EVENT: ++ return p.document() ++ case yaml_STREAM_END_EVENT: ++ // Happens when attempting to decode an empty buffer. ++ return nil ++ case yaml_TAIL_COMMENT_EVENT: ++ panic("internal error: unexpected tail comment event (please report)") ++ default: ++ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) ++ } ++} ++ ++func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { ++ var style Style ++ if tag != "" && tag != "!" { ++ tag = shortTag(tag) ++ style = TaggedStyle ++ } else if defaultTag != "" { ++ tag = defaultTag ++ } else if kind == ScalarNode { ++ tag, _ = resolve("", value) ++ } ++ n := &Node{ ++ Kind: kind, ++ Tag: tag, ++ Value: value, ++ Style: style, ++ } ++ if !p.textless { ++ n.Line = p.event.start_mark.line + 1 ++ n.Column = p.event.start_mark.column + 1 ++ n.HeadComment = string(p.event.head_comment) ++ n.LineComment = string(p.event.line_comment) ++ n.FootComment = string(p.event.foot_comment) ++ } ++ return n ++} ++ ++func (p *parser) parseChild(parent *Node) *Node { ++ child := p.parse() ++ parent.Content = append(parent.Content, child) ++ return child ++} ++ ++func (p *parser) document() *Node { ++ n := p.node(DocumentNode, "", "", "") ++ p.doc = n ++ p.expect(yaml_DOCUMENT_START_EVENT) ++ p.parseChild(n) ++ if p.peek() == yaml_DOCUMENT_END_EVENT { ++ n.FootComment = string(p.event.foot_comment) ++ } ++ p.expect(yaml_DOCUMENT_END_EVENT) ++ return n ++} ++ ++func (p *parser) alias() *Node { ++ n := p.node(AliasNode, "", "", string(p.event.anchor)) ++ n.Alias = p.anchors[n.Value] ++ if n.Alias == nil { ++ failf("unknown anchor '%s' referenced", n.Value) ++ } ++ p.expect(yaml_ALIAS_EVENT) ++ return n ++} ++ ++func (p *parser) scalar() *Node { ++ var parsedStyle = p.event.scalar_style() ++ var nodeStyle Style ++ switch { ++ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: ++ nodeStyle = DoubleQuotedStyle ++ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: ++ nodeStyle = SingleQuotedStyle ++ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: ++ nodeStyle = LiteralStyle ++ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: ++ nodeStyle = FoldedStyle ++ } ++ var nodeValue = string(p.event.value) ++ var nodeTag = string(p.event.tag) ++ var defaultTag string ++ if nodeStyle == 0 { ++ if nodeValue == "<<" { ++ defaultTag = mergeTag ++ } ++ } else { ++ defaultTag = strTag ++ } ++ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) ++ n.Style |= nodeStyle ++ p.anchor(n, p.event.anchor) ++ p.expect(yaml_SCALAR_EVENT) ++ return n ++} ++ ++func (p *parser) sequence() *Node { ++ n := p.node(SequenceNode, seqTag, string(p.event.tag), "") ++ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { ++ n.Style |= FlowStyle ++ } ++ p.anchor(n, p.event.anchor) ++ p.expect(yaml_SEQUENCE_START_EVENT) ++ for p.peek() != yaml_SEQUENCE_END_EVENT { ++ p.parseChild(n) ++ } ++ n.LineComment = string(p.event.line_comment) ++ n.FootComment = string(p.event.foot_comment) ++ p.expect(yaml_SEQUENCE_END_EVENT) ++ return n ++} ++ ++func (p *parser) mapping() *Node { ++ n := p.node(MappingNode, mapTag, string(p.event.tag), "") ++ block := true ++ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { ++ block = false ++ n.Style |= FlowStyle ++ } ++ p.anchor(n, p.event.anchor) ++ p.expect(yaml_MAPPING_START_EVENT) ++ for p.peek() != yaml_MAPPING_END_EVENT { ++ k := p.parseChild(n) ++ if block && k.FootComment != "" { ++ // Must be a foot comment for the prior value when being dedented. ++ if len(n.Content) > 2 { ++ n.Content[len(n.Content)-3].FootComment = k.FootComment ++ k.FootComment = "" ++ } ++ } ++ v := p.parseChild(n) ++ if k.FootComment == "" && v.FootComment != "" { ++ k.FootComment = v.FootComment ++ v.FootComment = "" ++ } ++ if p.peek() == yaml_TAIL_COMMENT_EVENT { ++ if k.FootComment == "" { ++ k.FootComment = string(p.event.foot_comment) ++ } ++ p.expect(yaml_TAIL_COMMENT_EVENT) ++ } ++ } ++ n.LineComment = string(p.event.line_comment) ++ n.FootComment = string(p.event.foot_comment) ++ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { ++ n.Content[len(n.Content)-2].FootComment = n.FootComment ++ n.FootComment = "" ++ } ++ p.expect(yaml_MAPPING_END_EVENT) ++ return n ++} ++ ++// ---------------------------------------------------------------------------- ++// Decoder, unmarshals a node into a provided value. ++ ++type decoder struct { ++ doc *Node ++ aliases map[*Node]bool ++ terrors []string ++ ++ stringMapType reflect.Type ++ generalMapType reflect.Type ++ ++ knownFields bool ++ uniqueKeys bool ++ decodeCount int ++ aliasCount int ++ aliasDepth int ++ ++ mergedFields map[interface{}]bool ++} ++ ++var ( ++ nodeType = reflect.TypeOf(Node{}) ++ durationType = reflect.TypeOf(time.Duration(0)) ++ stringMapType = reflect.TypeOf(map[string]interface{}{}) ++ generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) ++ ifaceType = generalMapType.Elem() ++ timeType = reflect.TypeOf(time.Time{}) ++ ptrTimeType = reflect.TypeOf(&time.Time{}) ++) ++ ++func newDecoder() *decoder { ++ d := &decoder{ ++ stringMapType: stringMapType, ++ generalMapType: generalMapType, ++ uniqueKeys: true, ++ } ++ d.aliases = make(map[*Node]bool) ++ return d ++} ++ ++func (d *decoder) terror(n *Node, tag string, out reflect.Value) { ++ if n.Tag != "" { ++ tag = n.Tag ++ } ++ value := n.Value ++ if tag != seqTag && tag != mapTag { ++ if len(value) > 10 { ++ value = " `" + value[:7] + "...`" ++ } else { ++ value = " `" + value + "`" ++ } ++ } ++ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) ++} ++ ++func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { ++ err := u.UnmarshalYAML(n) ++ if e, ok := err.(*TypeError); ok { ++ d.terrors = append(d.terrors, e.Errors...) ++ return false ++ } ++ if err != nil { ++ fail(err) ++ } ++ return true ++} ++ ++func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { ++ terrlen := len(d.terrors) ++ err := u.UnmarshalYAML(func(v interface{}) (err error) { ++ defer handleErr(&err) ++ d.unmarshal(n, reflect.ValueOf(v)) ++ if len(d.terrors) > terrlen { ++ issues := d.terrors[terrlen:] ++ d.terrors = d.terrors[:terrlen] ++ return &TypeError{issues} ++ } ++ return nil ++ }) ++ if e, ok := err.(*TypeError); ok { ++ d.terrors = append(d.terrors, e.Errors...) ++ return false ++ } ++ if err != nil { ++ fail(err) ++ } ++ return true ++} ++ ++// d.prepare initializes and dereferences pointers and calls UnmarshalYAML ++// if a value is found to implement it. ++// It returns the initialized and dereferenced out value, whether ++// unmarshalling was already done by UnmarshalYAML, and if so whether ++// its types unmarshalled appropriately. ++// ++// If n holds a null value, prepare returns before doing anything. ++func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { ++ if n.ShortTag() == nullTag { ++ return out, false, false ++ } ++ again := true ++ for again { ++ again = false ++ if out.Kind() == reflect.Ptr { ++ if out.IsNil() { ++ out.Set(reflect.New(out.Type().Elem())) ++ } ++ out = out.Elem() ++ again = true ++ } ++ if out.CanAddr() { ++ outi := out.Addr().Interface() ++ if u, ok := outi.(Unmarshaler); ok { ++ good = d.callUnmarshaler(n, u) ++ return out, true, good ++ } ++ if u, ok := outi.(obsoleteUnmarshaler); ok { ++ good = d.callObsoleteUnmarshaler(n, u) ++ return out, true, good ++ } ++ } ++ } ++ return out, false, false ++} ++ ++func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { ++ if n.ShortTag() == nullTag { ++ return reflect.Value{} ++ } ++ for _, num := range index { ++ for { ++ if v.Kind() == reflect.Ptr { ++ if v.IsNil() { ++ v.Set(reflect.New(v.Type().Elem())) ++ } ++ v = v.Elem() ++ continue ++ } ++ break ++ } ++ v = v.Field(num) ++ } ++ return v ++} ++ ++const ( ++ // 400,000 decode operations is ~500kb of dense object declarations, or ++ // ~5kb of dense object declarations with 10000% alias expansion ++ alias_ratio_range_low = 400000 ++ ++ // 4,000,000 decode operations is ~5MB of dense object declarations, or ++ // ~4.5MB of dense object declarations with 10% alias expansion ++ alias_ratio_range_high = 4000000 ++ ++ // alias_ratio_range is the range over which we scale allowed alias ratios ++ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) ++) ++ ++func allowedAliasRatio(decodeCount int) float64 { ++ switch { ++ case decodeCount <= alias_ratio_range_low: ++ // allow 99% to come from alias expansion for small-to-medium documents ++ return 0.99 ++ case decodeCount >= alias_ratio_range_high: ++ // allow 10% to come from alias expansion for very large documents ++ return 0.10 ++ default: ++ // scale smoothly from 99% down to 10% over the range. ++ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. ++ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). ++ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) ++ } ++} ++ ++func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { ++ d.decodeCount++ ++ if d.aliasDepth > 0 { ++ d.aliasCount++ ++ } ++ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { ++ failf("document contains excessive aliasing") ++ } ++ if out.Type() == nodeType { ++ out.Set(reflect.ValueOf(n).Elem()) ++ return true ++ } ++ switch n.Kind { ++ case DocumentNode: ++ return d.document(n, out) ++ case AliasNode: ++ return d.alias(n, out) ++ } ++ out, unmarshaled, good := d.prepare(n, out) ++ if unmarshaled { ++ return good ++ } ++ switch n.Kind { ++ case ScalarNode: ++ good = d.scalar(n, out) ++ case MappingNode: ++ good = d.mapping(n, out) ++ case SequenceNode: ++ good = d.sequence(n, out) ++ case 0: ++ if n.IsZero() { ++ return d.null(out) ++ } ++ fallthrough ++ default: ++ failf("cannot decode node with unknown kind %d", n.Kind) ++ } ++ return good ++} ++ ++func (d *decoder) document(n *Node, out reflect.Value) (good bool) { ++ if len(n.Content) == 1 { ++ d.doc = n ++ d.unmarshal(n.Content[0], out) ++ return true ++ } ++ return false ++} ++ ++func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { ++ if d.aliases[n] { ++ // TODO this could actually be allowed in some circumstances. ++ failf("anchor '%s' value contains itself", n.Value) ++ } ++ d.aliases[n] = true ++ d.aliasDepth++ ++ good = d.unmarshal(n.Alias, out) ++ d.aliasDepth-- ++ delete(d.aliases, n) ++ return good ++} ++ ++var zeroValue reflect.Value ++ ++func resetMap(out reflect.Value) { ++ for _, k := range out.MapKeys() { ++ out.SetMapIndex(k, zeroValue) ++ } ++} ++ ++func (d *decoder) null(out reflect.Value) bool { ++ if out.CanAddr() { ++ switch out.Kind() { ++ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: ++ out.Set(reflect.Zero(out.Type())) ++ return true ++ } ++ } ++ return false ++} ++ ++func (d *decoder) scalar(n *Node, out reflect.Value) bool { ++ var tag string ++ var resolved interface{} ++ if n.indicatedString() { ++ tag = strTag ++ resolved = n.Value ++ } else { ++ tag, resolved = resolve(n.Tag, n.Value) ++ if tag == binaryTag { ++ data, err := base64.StdEncoding.DecodeString(resolved.(string)) ++ if err != nil { ++ failf("!!binary value contains invalid base64 data") ++ } ++ resolved = string(data) ++ } ++ } ++ if resolved == nil { ++ return d.null(out) ++ } ++ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { ++ // We've resolved to exactly the type we want, so use that. ++ out.Set(resolvedv) ++ return true ++ } ++ // Perhaps we can use the value as a TextUnmarshaler to ++ // set its value. ++ if out.CanAddr() { ++ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) ++ if ok { ++ var text []byte ++ if tag == binaryTag { ++ text = []byte(resolved.(string)) ++ } else { ++ // We let any value be unmarshaled into TextUnmarshaler. ++ // That might be more lax than we'd like, but the ++ // TextUnmarshaler itself should bowl out any dubious values. ++ text = []byte(n.Value) ++ } ++ err := u.UnmarshalText(text) ++ if err != nil { ++ fail(err) ++ } ++ return true ++ } ++ } ++ switch out.Kind() { ++ case reflect.String: ++ if tag == binaryTag { ++ out.SetString(resolved.(string)) ++ return true ++ } ++ out.SetString(n.Value) ++ return true ++ case reflect.Interface: ++ out.Set(reflect.ValueOf(resolved)) ++ return true ++ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ++ // This used to work in v2, but it's very unfriendly. ++ isDuration := out.Type() == durationType ++ ++ switch resolved := resolved.(type) { ++ case int: ++ if !isDuration && !out.OverflowInt(int64(resolved)) { ++ out.SetInt(int64(resolved)) ++ return true ++ } ++ case int64: ++ if !isDuration && !out.OverflowInt(resolved) { ++ out.SetInt(resolved) ++ return true ++ } ++ case uint64: ++ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { ++ out.SetInt(int64(resolved)) ++ return true ++ } ++ case float64: ++ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { ++ out.SetInt(int64(resolved)) ++ return true ++ } ++ case string: ++ if out.Type() == durationType { ++ d, err := time.ParseDuration(resolved) ++ if err == nil { ++ out.SetInt(int64(d)) ++ return true ++ } ++ } ++ } ++ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: ++ switch resolved := resolved.(type) { ++ case int: ++ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { ++ out.SetUint(uint64(resolved)) ++ return true ++ } ++ case int64: ++ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { ++ out.SetUint(uint64(resolved)) ++ return true ++ } ++ case uint64: ++ if !out.OverflowUint(uint64(resolved)) { ++ out.SetUint(uint64(resolved)) ++ return true ++ } ++ case float64: ++ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { ++ out.SetUint(uint64(resolved)) ++ return true ++ } ++ } ++ case reflect.Bool: ++ switch resolved := resolved.(type) { ++ case bool: ++ out.SetBool(resolved) ++ return true ++ case string: ++ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). ++ // It only works if explicitly attempting to unmarshal into a typed bool value. ++ switch resolved { ++ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": ++ out.SetBool(true) ++ return true ++ case "n", "N", "no", "No", "NO", "off", "Off", "OFF": ++ out.SetBool(false) ++ return true ++ } ++ } ++ case reflect.Float32, reflect.Float64: ++ switch resolved := resolved.(type) { ++ case int: ++ out.SetFloat(float64(resolved)) ++ return true ++ case int64: ++ out.SetFloat(float64(resolved)) ++ return true ++ case uint64: ++ out.SetFloat(float64(resolved)) ++ return true ++ case float64: ++ out.SetFloat(resolved) ++ return true ++ } ++ case reflect.Struct: ++ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { ++ out.Set(resolvedv) ++ return true ++ } ++ case reflect.Ptr: ++ panic("yaml internal error: please report the issue") ++ } ++ d.terror(n, tag, out) ++ return false ++} ++ ++func settableValueOf(i interface{}) reflect.Value { ++ v := reflect.ValueOf(i) ++ sv := reflect.New(v.Type()).Elem() ++ sv.Set(v) ++ return sv ++} ++ ++func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { ++ l := len(n.Content) ++ ++ var iface reflect.Value ++ switch out.Kind() { ++ case reflect.Slice: ++ out.Set(reflect.MakeSlice(out.Type(), l, l)) ++ case reflect.Array: ++ if l != out.Len() { ++ failf("invalid array: want %d elements but got %d", out.Len(), l) ++ } ++ case reflect.Interface: ++ // No type hints. Will have to use a generic sequence. ++ iface = out ++ out = settableValueOf(make([]interface{}, l)) ++ default: ++ d.terror(n, seqTag, out) ++ return false ++ } ++ et := out.Type().Elem() ++ ++ j := 0 ++ for i := 0; i < l; i++ { ++ e := reflect.New(et).Elem() ++ if ok := d.unmarshal(n.Content[i], e); ok { ++ out.Index(j).Set(e) ++ j++ ++ } ++ } ++ if out.Kind() != reflect.Array { ++ out.Set(out.Slice(0, j)) ++ } ++ if iface.IsValid() { ++ iface.Set(out) ++ } ++ return true ++} ++ ++func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { ++ l := len(n.Content) ++ if d.uniqueKeys { ++ nerrs := len(d.terrors) ++ for i := 0; i < l; i += 2 { ++ ni := n.Content[i] ++ for j := i + 2; j < l; j += 2 { ++ nj := n.Content[j] ++ if ni.Kind == nj.Kind && ni.Value == nj.Value { ++ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) ++ } ++ } ++ } ++ if len(d.terrors) > nerrs { ++ return false ++ } ++ } ++ switch out.Kind() { ++ case reflect.Struct: ++ return d.mappingStruct(n, out) ++ case reflect.Map: ++ // okay ++ case reflect.Interface: ++ iface := out ++ if isStringMap(n) { ++ out = reflect.MakeMap(d.stringMapType) ++ } else { ++ out = reflect.MakeMap(d.generalMapType) ++ } ++ iface.Set(out) ++ default: ++ d.terror(n, mapTag, out) ++ return false ++ } ++ ++ outt := out.Type() ++ kt := outt.Key() ++ et := outt.Elem() ++ ++ stringMapType := d.stringMapType ++ generalMapType := d.generalMapType ++ if outt.Elem() == ifaceType { ++ if outt.Key().Kind() == reflect.String { ++ d.stringMapType = outt ++ } else if outt.Key() == ifaceType { ++ d.generalMapType = outt ++ } ++ } ++ ++ mergedFields := d.mergedFields ++ d.mergedFields = nil ++ ++ var mergeNode *Node ++ ++ mapIsNew := false ++ if out.IsNil() { ++ out.Set(reflect.MakeMap(outt)) ++ mapIsNew = true ++ } ++ for i := 0; i < l; i += 2 { ++ if isMerge(n.Content[i]) { ++ mergeNode = n.Content[i+1] ++ continue ++ } ++ k := reflect.New(kt).Elem() ++ if d.unmarshal(n.Content[i], k) { ++ if mergedFields != nil { ++ ki := k.Interface() ++ if mergedFields[ki] { ++ continue ++ } ++ mergedFields[ki] = true ++ } ++ kkind := k.Kind() ++ if kkind == reflect.Interface { ++ kkind = k.Elem().Kind() ++ } ++ if kkind == reflect.Map || kkind == reflect.Slice { ++ failf("invalid map key: %#v", k.Interface()) ++ } ++ e := reflect.New(et).Elem() ++ if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { ++ out.SetMapIndex(k, e) ++ } ++ } ++ } ++ ++ d.mergedFields = mergedFields ++ if mergeNode != nil { ++ d.merge(n, mergeNode, out) ++ } ++ ++ d.stringMapType = stringMapType ++ d.generalMapType = generalMapType ++ return true ++} ++ ++func isStringMap(n *Node) bool { ++ if n.Kind != MappingNode { ++ return false ++ } ++ l := len(n.Content) ++ for i := 0; i < l; i += 2 { ++ shortTag := n.Content[i].ShortTag() ++ if shortTag != strTag && shortTag != mergeTag { ++ return false ++ } ++ } ++ return true ++} ++ ++func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { ++ sinfo, err := getStructInfo(out.Type()) ++ if err != nil { ++ panic(err) ++ } ++ ++ var inlineMap reflect.Value ++ var elemType reflect.Type ++ if sinfo.InlineMap != -1 { ++ inlineMap = out.Field(sinfo.InlineMap) ++ elemType = inlineMap.Type().Elem() ++ } ++ ++ for _, index := range sinfo.InlineUnmarshalers { ++ field := d.fieldByIndex(n, out, index) ++ d.prepare(n, field) ++ } ++ ++ mergedFields := d.mergedFields ++ d.mergedFields = nil ++ var mergeNode *Node ++ var doneFields []bool ++ if d.uniqueKeys { ++ doneFields = make([]bool, len(sinfo.FieldsList)) ++ } ++ name := settableValueOf("") ++ l := len(n.Content) ++ for i := 0; i < l; i += 2 { ++ ni := n.Content[i] ++ if isMerge(ni) { ++ mergeNode = n.Content[i+1] ++ continue ++ } ++ if !d.unmarshal(ni, name) { ++ continue ++ } ++ sname := name.String() ++ if mergedFields != nil { ++ if mergedFields[sname] { ++ continue ++ } ++ mergedFields[sname] = true ++ } ++ if info, ok := sinfo.FieldsMap[sname]; ok { ++ if d.uniqueKeys { ++ if doneFields[info.Id] { ++ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) ++ continue ++ } ++ doneFields[info.Id] = true ++ } ++ var field reflect.Value ++ if info.Inline == nil { ++ field = out.Field(info.Num) ++ } else { ++ field = d.fieldByIndex(n, out, info.Inline) ++ } ++ d.unmarshal(n.Content[i+1], field) ++ } else if sinfo.InlineMap != -1 { ++ if inlineMap.IsNil() { ++ inlineMap.Set(reflect.MakeMap(inlineMap.Type())) ++ } ++ value := reflect.New(elemType).Elem() ++ d.unmarshal(n.Content[i+1], value) ++ inlineMap.SetMapIndex(name, value) ++ } else if d.knownFields { ++ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) ++ } ++ } ++ ++ d.mergedFields = mergedFields ++ if mergeNode != nil { ++ d.merge(n, mergeNode, out) ++ } ++ return true ++} ++ ++func failWantMap() { ++ failf("map merge requires map or sequence of maps as the value") ++} ++ ++func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { ++ mergedFields := d.mergedFields ++ if mergedFields == nil { ++ d.mergedFields = make(map[interface{}]bool) ++ for i := 0; i < len(parent.Content); i += 2 { ++ k := reflect.New(ifaceType).Elem() ++ if d.unmarshal(parent.Content[i], k) { ++ d.mergedFields[k.Interface()] = true ++ } ++ } ++ } ++ ++ switch merge.Kind { ++ case MappingNode: ++ d.unmarshal(merge, out) ++ case AliasNode: ++ if merge.Alias != nil && merge.Alias.Kind != MappingNode { ++ failWantMap() ++ } ++ d.unmarshal(merge, out) ++ case SequenceNode: ++ for i := 0; i < len(merge.Content); i++ { ++ ni := merge.Content[i] ++ if ni.Kind == AliasNode { ++ if ni.Alias != nil && ni.Alias.Kind != MappingNode { ++ failWantMap() ++ } ++ } else if ni.Kind != MappingNode { ++ failWantMap() ++ } ++ d.unmarshal(ni, out) ++ } ++ default: ++ failWantMap() ++ } ++ ++ d.mergedFields = mergedFields ++} ++ ++func isMerge(n *Node) bool { ++ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) ++} +diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go +new file mode 100644 +index 000000000000..0f47c9ca8add +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/emitterc.go +@@ -0,0 +1,2020 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// Copyright (c) 2006-2010 Kirill Simonov ++// ++// Permission is hereby granted, free of charge, to any person obtaining a copy of ++// this software and associated documentation files (the "Software"), to deal in ++// the Software without restriction, including without limitation the rights to ++// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++// of the Software, and to permit persons to whom the Software is furnished to do ++// so, subject to the following conditions: ++// ++// The above copyright notice and this permission notice shall be included in all ++// copies or substantial portions of the Software. ++// ++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++// SOFTWARE. ++ ++package yaml ++ ++import ( ++ "bytes" ++ "fmt" ++) ++ ++// Flush the buffer if needed. ++func flush(emitter *yaml_emitter_t) bool { ++ if emitter.buffer_pos+5 >= len(emitter.buffer) { ++ return yaml_emitter_flush(emitter) ++ } ++ return true ++} ++ ++// Put a character to the output buffer. ++func put(emitter *yaml_emitter_t, value byte) bool { ++ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { ++ return false ++ } ++ emitter.buffer[emitter.buffer_pos] = value ++ emitter.buffer_pos++ ++ emitter.column++ ++ return true ++} ++ ++// Put a line break to the output buffer. ++func put_break(emitter *yaml_emitter_t) bool { ++ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { ++ return false ++ } ++ switch emitter.line_break { ++ case yaml_CR_BREAK: ++ emitter.buffer[emitter.buffer_pos] = '\r' ++ emitter.buffer_pos += 1 ++ case yaml_LN_BREAK: ++ emitter.buffer[emitter.buffer_pos] = '\n' ++ emitter.buffer_pos += 1 ++ case yaml_CRLN_BREAK: ++ emitter.buffer[emitter.buffer_pos+0] = '\r' ++ emitter.buffer[emitter.buffer_pos+1] = '\n' ++ emitter.buffer_pos += 2 ++ default: ++ panic("unknown line break setting") ++ } ++ if emitter.column == 0 { ++ emitter.space_above = true ++ } ++ emitter.column = 0 ++ emitter.line++ ++ // [Go] Do this here and below and drop from everywhere else (see commented lines). ++ emitter.indention = true ++ return true ++} ++ ++// Copy a character from a string into buffer. ++func write(emitter *yaml_emitter_t, s []byte, i *int) bool { ++ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { ++ return false ++ } ++ p := emitter.buffer_pos ++ w := width(s[*i]) ++ switch w { ++ case 4: ++ emitter.buffer[p+3] = s[*i+3] ++ fallthrough ++ case 3: ++ emitter.buffer[p+2] = s[*i+2] ++ fallthrough ++ case 2: ++ emitter.buffer[p+1] = s[*i+1] ++ fallthrough ++ case 1: ++ emitter.buffer[p+0] = s[*i+0] ++ default: ++ panic("unknown character width") ++ } ++ emitter.column++ ++ emitter.buffer_pos += w ++ *i += w ++ return true ++} ++ ++// Write a whole string into buffer. ++func write_all(emitter *yaml_emitter_t, s []byte) bool { ++ for i := 0; i < len(s); { ++ if !write(emitter, s, &i) { ++ return false ++ } ++ } ++ return true ++} ++ ++// Copy a line break character from a string into buffer. ++func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { ++ if s[*i] == '\n' { ++ if !put_break(emitter) { ++ return false ++ } ++ *i++ ++ } else { ++ if !write(emitter, s, i) { ++ return false ++ } ++ if emitter.column == 0 { ++ emitter.space_above = true ++ } ++ emitter.column = 0 ++ emitter.line++ ++ // [Go] Do this here and above and drop from everywhere else (see commented lines). ++ emitter.indention = true ++ } ++ return true ++} ++ ++// Set an emitter error and return false. ++func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { ++ emitter.error = yaml_EMITTER_ERROR ++ emitter.problem = problem ++ return false ++} ++ ++// Emit an event. ++func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ emitter.events = append(emitter.events, *event) ++ for !yaml_emitter_need_more_events(emitter) { ++ event := &emitter.events[emitter.events_head] ++ if !yaml_emitter_analyze_event(emitter, event) { ++ return false ++ } ++ if !yaml_emitter_state_machine(emitter, event) { ++ return false ++ } ++ yaml_event_delete(event) ++ emitter.events_head++ ++ } ++ return true ++} ++ ++// Check if we need to accumulate more events before emitting. ++// ++// We accumulate extra ++// - 1 event for DOCUMENT-START ++// - 2 events for SEQUENCE-START ++// - 3 events for MAPPING-START ++// ++func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { ++ if emitter.events_head == len(emitter.events) { ++ return true ++ } ++ var accumulate int ++ switch emitter.events[emitter.events_head].typ { ++ case yaml_DOCUMENT_START_EVENT: ++ accumulate = 1 ++ break ++ case yaml_SEQUENCE_START_EVENT: ++ accumulate = 2 ++ break ++ case yaml_MAPPING_START_EVENT: ++ accumulate = 3 ++ break ++ default: ++ return false ++ } ++ if len(emitter.events)-emitter.events_head > accumulate { ++ return false ++ } ++ var level int ++ for i := emitter.events_head; i < len(emitter.events); i++ { ++ switch emitter.events[i].typ { ++ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: ++ level++ ++ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: ++ level-- ++ } ++ if level == 0 { ++ return false ++ } ++ } ++ return true ++} ++ ++// Append a directive to the directives stack. ++func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { ++ for i := 0; i < len(emitter.tag_directives); i++ { ++ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { ++ if allow_duplicates { ++ return true ++ } ++ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") ++ } ++ } ++ ++ // [Go] Do we actually need to copy this given garbage collection ++ // and the lack of deallocating destructors? ++ tag_copy := yaml_tag_directive_t{ ++ handle: make([]byte, len(value.handle)), ++ prefix: make([]byte, len(value.prefix)), ++ } ++ copy(tag_copy.handle, value.handle) ++ copy(tag_copy.prefix, value.prefix) ++ emitter.tag_directives = append(emitter.tag_directives, tag_copy) ++ return true ++} ++ ++// Increase the indentation level. ++func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { ++ emitter.indents = append(emitter.indents, emitter.indent) ++ if emitter.indent < 0 { ++ if flow { ++ emitter.indent = emitter.best_indent ++ } else { ++ emitter.indent = 0 ++ } ++ } else if !indentless { ++ // [Go] This was changed so that indentations are more regular. ++ if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { ++ // The first indent inside a sequence will just skip the "- " indicator. ++ emitter.indent += 2 ++ } else { ++ // Everything else aligns to the chosen indentation. ++ emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) ++ } ++ } ++ return true ++} ++ ++// State dispatcher. ++func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ switch emitter.state { ++ default: ++ case yaml_EMIT_STREAM_START_STATE: ++ return yaml_emitter_emit_stream_start(emitter, event) ++ ++ case yaml_EMIT_FIRST_DOCUMENT_START_STATE: ++ return yaml_emitter_emit_document_start(emitter, event, true) ++ ++ case yaml_EMIT_DOCUMENT_START_STATE: ++ return yaml_emitter_emit_document_start(emitter, event, false) ++ ++ case yaml_EMIT_DOCUMENT_CONTENT_STATE: ++ return yaml_emitter_emit_document_content(emitter, event) ++ ++ case yaml_EMIT_DOCUMENT_END_STATE: ++ return yaml_emitter_emit_document_end(emitter, event) ++ ++ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: ++ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) ++ ++ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: ++ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) ++ ++ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: ++ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) ++ ++ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: ++ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) ++ ++ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: ++ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) ++ ++ case yaml_EMIT_FLOW_MAPPING_KEY_STATE: ++ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) ++ ++ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: ++ return yaml_emitter_emit_flow_mapping_value(emitter, event, true) ++ ++ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: ++ return yaml_emitter_emit_flow_mapping_value(emitter, event, false) ++ ++ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: ++ return yaml_emitter_emit_block_sequence_item(emitter, event, true) ++ ++ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: ++ return yaml_emitter_emit_block_sequence_item(emitter, event, false) ++ ++ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: ++ return yaml_emitter_emit_block_mapping_key(emitter, event, true) ++ ++ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: ++ return yaml_emitter_emit_block_mapping_key(emitter, event, false) ++ ++ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: ++ return yaml_emitter_emit_block_mapping_value(emitter, event, true) ++ ++ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: ++ return yaml_emitter_emit_block_mapping_value(emitter, event, false) ++ ++ case yaml_EMIT_END_STATE: ++ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") ++ } ++ panic("invalid emitter state") ++} ++ ++// Expect STREAM-START. ++func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ if event.typ != yaml_STREAM_START_EVENT { ++ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") ++ } ++ if emitter.encoding == yaml_ANY_ENCODING { ++ emitter.encoding = event.encoding ++ if emitter.encoding == yaml_ANY_ENCODING { ++ emitter.encoding = yaml_UTF8_ENCODING ++ } ++ } ++ if emitter.best_indent < 2 || emitter.best_indent > 9 { ++ emitter.best_indent = 2 ++ } ++ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { ++ emitter.best_width = 80 ++ } ++ if emitter.best_width < 0 { ++ emitter.best_width = 1<<31 - 1 ++ } ++ if emitter.line_break == yaml_ANY_BREAK { ++ emitter.line_break = yaml_LN_BREAK ++ } ++ ++ emitter.indent = -1 ++ emitter.line = 0 ++ emitter.column = 0 ++ emitter.whitespace = true ++ emitter.indention = true ++ emitter.space_above = true ++ emitter.foot_indent = -1 ++ ++ if emitter.encoding != yaml_UTF8_ENCODING { ++ if !yaml_emitter_write_bom(emitter) { ++ return false ++ } ++ } ++ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE ++ return true ++} ++ ++// Expect DOCUMENT-START or STREAM-END. ++func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { ++ ++ if event.typ == yaml_DOCUMENT_START_EVENT { ++ ++ if event.version_directive != nil { ++ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { ++ return false ++ } ++ } ++ ++ for i := 0; i < len(event.tag_directives); i++ { ++ tag_directive := &event.tag_directives[i] ++ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { ++ return false ++ } ++ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { ++ return false ++ } ++ } ++ ++ for i := 0; i < len(default_tag_directives); i++ { ++ tag_directive := &default_tag_directives[i] ++ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { ++ return false ++ } ++ } ++ ++ implicit := event.implicit ++ if !first || emitter.canonical { ++ implicit = false ++ } ++ ++ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { ++ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { ++ return false ++ } ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ ++ if event.version_directive != nil { ++ implicit = false ++ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { ++ return false ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { ++ return false ++ } ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ ++ if len(event.tag_directives) > 0 { ++ implicit = false ++ for i := 0; i < len(event.tag_directives); i++ { ++ tag_directive := &event.tag_directives[i] ++ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { ++ return false ++ } ++ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { ++ return false ++ } ++ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { ++ return false ++ } ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ } ++ ++ if yaml_emitter_check_empty_document(emitter) { ++ implicit = false ++ } ++ if !implicit { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { ++ return false ++ } ++ if emitter.canonical || true { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ } ++ ++ if len(emitter.head_comment) > 0 { ++ if !yaml_emitter_process_head_comment(emitter) { ++ return false ++ } ++ if !put_break(emitter) { ++ return false ++ } ++ } ++ ++ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE ++ return true ++ } ++ ++ if event.typ == yaml_STREAM_END_EVENT { ++ if emitter.open_ended { ++ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { ++ return false ++ } ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ if !yaml_emitter_flush(emitter) { ++ return false ++ } ++ emitter.state = yaml_EMIT_END_STATE ++ return true ++ } ++ ++ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") ++} ++ ++// Expect the root node. ++func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) ++ ++ if !yaml_emitter_process_head_comment(emitter) { ++ return false ++ } ++ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { ++ return false ++ } ++ if !yaml_emitter_process_line_comment(emitter) { ++ return false ++ } ++ if !yaml_emitter_process_foot_comment(emitter) { ++ return false ++ } ++ return true ++} ++ ++// Expect DOCUMENT-END. ++func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ if event.typ != yaml_DOCUMENT_END_EVENT { ++ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") ++ } ++ // [Go] Force document foot separation. ++ emitter.foot_indent = 0 ++ if !yaml_emitter_process_foot_comment(emitter) { ++ return false ++ } ++ emitter.foot_indent = -1 ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ if !event.implicit { ++ // [Go] Allocate the slice elsewhere. ++ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { ++ return false ++ } ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ if !yaml_emitter_flush(emitter) { ++ return false ++ } ++ emitter.state = yaml_EMIT_DOCUMENT_START_STATE ++ emitter.tag_directives = emitter.tag_directives[:0] ++ return true ++} ++ ++// Expect a flow item node. ++func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { ++ if first { ++ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { ++ return false ++ } ++ if !yaml_emitter_increase_indent(emitter, true, false) { ++ return false ++ } ++ emitter.flow_level++ ++ } ++ ++ if event.typ == yaml_SEQUENCE_END_EVENT { ++ if emitter.canonical && !first && !trail { ++ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { ++ return false ++ } ++ } ++ emitter.flow_level-- ++ emitter.indent = emitter.indents[len(emitter.indents)-1] ++ emitter.indents = emitter.indents[:len(emitter.indents)-1] ++ if emitter.column == 0 || emitter.canonical && !first { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { ++ return false ++ } ++ if !yaml_emitter_process_line_comment(emitter) { ++ return false ++ } ++ if !yaml_emitter_process_foot_comment(emitter) { ++ return false ++ } ++ emitter.state = emitter.states[len(emitter.states)-1] ++ emitter.states = emitter.states[:len(emitter.states)-1] ++ ++ return true ++ } ++ ++ if !first && !trail { ++ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { ++ return false ++ } ++ } ++ ++ if !yaml_emitter_process_head_comment(emitter) { ++ return false ++ } ++ if emitter.column == 0 { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ ++ if emitter.canonical || emitter.column > emitter.best_width { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { ++ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) ++ } else { ++ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) ++ } ++ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { ++ return false ++ } ++ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { ++ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { ++ return false ++ } ++ } ++ if !yaml_emitter_process_line_comment(emitter) { ++ return false ++ } ++ if !yaml_emitter_process_foot_comment(emitter) { ++ return false ++ } ++ return true ++} ++ ++// Expect a flow key node. ++func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { ++ if first { ++ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { ++ return false ++ } ++ if !yaml_emitter_increase_indent(emitter, true, false) { ++ return false ++ } ++ emitter.flow_level++ ++ } ++ ++ if event.typ == yaml_MAPPING_END_EVENT { ++ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { ++ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { ++ return false ++ } ++ } ++ if !yaml_emitter_process_head_comment(emitter) { ++ return false ++ } ++ emitter.flow_level-- ++ emitter.indent = emitter.indents[len(emitter.indents)-1] ++ emitter.indents = emitter.indents[:len(emitter.indents)-1] ++ if emitter.canonical && !first { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { ++ return false ++ } ++ if !yaml_emitter_process_line_comment(emitter) { ++ return false ++ } ++ if !yaml_emitter_process_foot_comment(emitter) { ++ return false ++ } ++ emitter.state = emitter.states[len(emitter.states)-1] ++ emitter.states = emitter.states[:len(emitter.states)-1] ++ return true ++ } ++ ++ if !first && !trail { ++ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { ++ return false ++ } ++ } ++ ++ if !yaml_emitter_process_head_comment(emitter) { ++ return false ++ } ++ ++ if emitter.column == 0 { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ ++ if emitter.canonical || emitter.column > emitter.best_width { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ ++ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { ++ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) ++ return yaml_emitter_emit_node(emitter, event, false, false, true, true) ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { ++ return false ++ } ++ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) ++ return yaml_emitter_emit_node(emitter, event, false, false, true, false) ++} ++ ++// Expect a flow value node. ++func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { ++ if simple { ++ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { ++ return false ++ } ++ } else { ++ if emitter.canonical || emitter.column > emitter.best_width { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { ++ return false ++ } ++ } ++ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { ++ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) ++ } else { ++ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) ++ } ++ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { ++ return false ++ } ++ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { ++ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { ++ return false ++ } ++ } ++ if !yaml_emitter_process_line_comment(emitter) { ++ return false ++ } ++ if !yaml_emitter_process_foot_comment(emitter) { ++ return false ++ } ++ return true ++} ++ ++// Expect a block item node. ++func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { ++ if first { ++ if !yaml_emitter_increase_indent(emitter, false, false) { ++ return false ++ } ++ } ++ if event.typ == yaml_SEQUENCE_END_EVENT { ++ emitter.indent = emitter.indents[len(emitter.indents)-1] ++ emitter.indents = emitter.indents[:len(emitter.indents)-1] ++ emitter.state = emitter.states[len(emitter.states)-1] ++ emitter.states = emitter.states[:len(emitter.states)-1] ++ return true ++ } ++ if !yaml_emitter_process_head_comment(emitter) { ++ return false ++ } ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { ++ return false ++ } ++ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) ++ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { ++ return false ++ } ++ if !yaml_emitter_process_line_comment(emitter) { ++ return false ++ } ++ if !yaml_emitter_process_foot_comment(emitter) { ++ return false ++ } ++ return true ++} ++ ++// Expect a block key node. ++func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { ++ if first { ++ if !yaml_emitter_increase_indent(emitter, false, false) { ++ return false ++ } ++ } ++ if !yaml_emitter_process_head_comment(emitter) { ++ return false ++ } ++ if event.typ == yaml_MAPPING_END_EVENT { ++ emitter.indent = emitter.indents[len(emitter.indents)-1] ++ emitter.indents = emitter.indents[:len(emitter.indents)-1] ++ emitter.state = emitter.states[len(emitter.states)-1] ++ emitter.states = emitter.states[:len(emitter.states)-1] ++ return true ++ } ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ if len(emitter.line_comment) > 0 { ++ // [Go] A line comment was provided for the key. That's unusual as the ++ // scanner associates line comments with the value. Either way, ++ // save the line comment and render it appropriately later. ++ emitter.key_line_comment = emitter.line_comment ++ emitter.line_comment = nil ++ } ++ if yaml_emitter_check_simple_key(emitter) { ++ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) ++ return yaml_emitter_emit_node(emitter, event, false, false, true, true) ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { ++ return false ++ } ++ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) ++ return yaml_emitter_emit_node(emitter, event, false, false, true, false) ++} ++ ++// Expect a block value node. ++func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { ++ if simple { ++ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { ++ return false ++ } ++ } else { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { ++ return false ++ } ++ } ++ if len(emitter.key_line_comment) > 0 { ++ // [Go] Line comments are generally associated with the value, but when there's ++ // no value on the same line as a mapping key they end up attached to the ++ // key itself. ++ if event.typ == yaml_SCALAR_EVENT { ++ if len(emitter.line_comment) == 0 { ++ // A scalar is coming and it has no line comments by itself yet, ++ // so just let it handle the line comment as usual. If it has a ++ // line comment, we can't have both so the one from the key is lost. ++ emitter.line_comment = emitter.key_line_comment ++ emitter.key_line_comment = nil ++ } ++ } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) { ++ // An indented block follows, so write the comment right now. ++ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment ++ if !yaml_emitter_process_line_comment(emitter) { ++ return false ++ } ++ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment ++ } ++ } ++ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) ++ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { ++ return false ++ } ++ if !yaml_emitter_process_line_comment(emitter) { ++ return false ++ } ++ if !yaml_emitter_process_foot_comment(emitter) { ++ return false ++ } ++ return true ++} ++ ++func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 ++} ++ ++// Expect a node. ++func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, ++ root bool, sequence bool, mapping bool, simple_key bool) bool { ++ ++ emitter.root_context = root ++ emitter.sequence_context = sequence ++ emitter.mapping_context = mapping ++ emitter.simple_key_context = simple_key ++ ++ switch event.typ { ++ case yaml_ALIAS_EVENT: ++ return yaml_emitter_emit_alias(emitter, event) ++ case yaml_SCALAR_EVENT: ++ return yaml_emitter_emit_scalar(emitter, event) ++ case yaml_SEQUENCE_START_EVENT: ++ return yaml_emitter_emit_sequence_start(emitter, event) ++ case yaml_MAPPING_START_EVENT: ++ return yaml_emitter_emit_mapping_start(emitter, event) ++ default: ++ return yaml_emitter_set_emitter_error(emitter, ++ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) ++ } ++} ++ ++// Expect ALIAS. ++func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ if !yaml_emitter_process_anchor(emitter) { ++ return false ++ } ++ emitter.state = emitter.states[len(emitter.states)-1] ++ emitter.states = emitter.states[:len(emitter.states)-1] ++ return true ++} ++ ++// Expect SCALAR. ++func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ if !yaml_emitter_select_scalar_style(emitter, event) { ++ return false ++ } ++ if !yaml_emitter_process_anchor(emitter) { ++ return false ++ } ++ if !yaml_emitter_process_tag(emitter) { ++ return false ++ } ++ if !yaml_emitter_increase_indent(emitter, true, false) { ++ return false ++ } ++ if !yaml_emitter_process_scalar(emitter) { ++ return false ++ } ++ emitter.indent = emitter.indents[len(emitter.indents)-1] ++ emitter.indents = emitter.indents[:len(emitter.indents)-1] ++ emitter.state = emitter.states[len(emitter.states)-1] ++ emitter.states = emitter.states[:len(emitter.states)-1] ++ return true ++} ++ ++// Expect SEQUENCE-START. ++func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ if !yaml_emitter_process_anchor(emitter) { ++ return false ++ } ++ if !yaml_emitter_process_tag(emitter) { ++ return false ++ } ++ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || ++ yaml_emitter_check_empty_sequence(emitter) { ++ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE ++ } else { ++ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE ++ } ++ return true ++} ++ ++// Expect MAPPING-START. ++func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ if !yaml_emitter_process_anchor(emitter) { ++ return false ++ } ++ if !yaml_emitter_process_tag(emitter) { ++ return false ++ } ++ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || ++ yaml_emitter_check_empty_mapping(emitter) { ++ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE ++ } else { ++ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE ++ } ++ return true ++} ++ ++// Check if the document content is an empty scalar. ++func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { ++ return false // [Go] Huh? ++} ++ ++// Check if the next events represent an empty sequence. ++func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { ++ if len(emitter.events)-emitter.events_head < 2 { ++ return false ++ } ++ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && ++ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT ++} ++ ++// Check if the next events represent an empty mapping. ++func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { ++ if len(emitter.events)-emitter.events_head < 2 { ++ return false ++ } ++ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && ++ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT ++} ++ ++// Check if the next node can be expressed as a simple key. ++func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { ++ length := 0 ++ switch emitter.events[emitter.events_head].typ { ++ case yaml_ALIAS_EVENT: ++ length += len(emitter.anchor_data.anchor) ++ case yaml_SCALAR_EVENT: ++ if emitter.scalar_data.multiline { ++ return false ++ } ++ length += len(emitter.anchor_data.anchor) + ++ len(emitter.tag_data.handle) + ++ len(emitter.tag_data.suffix) + ++ len(emitter.scalar_data.value) ++ case yaml_SEQUENCE_START_EVENT: ++ if !yaml_emitter_check_empty_sequence(emitter) { ++ return false ++ } ++ length += len(emitter.anchor_data.anchor) + ++ len(emitter.tag_data.handle) + ++ len(emitter.tag_data.suffix) ++ case yaml_MAPPING_START_EVENT: ++ if !yaml_emitter_check_empty_mapping(emitter) { ++ return false ++ } ++ length += len(emitter.anchor_data.anchor) + ++ len(emitter.tag_data.handle) + ++ len(emitter.tag_data.suffix) ++ default: ++ return false ++ } ++ return length <= 128 ++} ++ ++// Determine an acceptable scalar style. ++func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ ++ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 ++ if no_tag && !event.implicit && !event.quoted_implicit { ++ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") ++ } ++ ++ style := event.scalar_style() ++ if style == yaml_ANY_SCALAR_STYLE { ++ style = yaml_PLAIN_SCALAR_STYLE ++ } ++ if emitter.canonical { ++ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE ++ } ++ if emitter.simple_key_context && emitter.scalar_data.multiline { ++ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE ++ } ++ ++ if style == yaml_PLAIN_SCALAR_STYLE { ++ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || ++ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { ++ style = yaml_SINGLE_QUOTED_SCALAR_STYLE ++ } ++ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { ++ style = yaml_SINGLE_QUOTED_SCALAR_STYLE ++ } ++ if no_tag && !event.implicit { ++ style = yaml_SINGLE_QUOTED_SCALAR_STYLE ++ } ++ } ++ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { ++ if !emitter.scalar_data.single_quoted_allowed { ++ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE ++ } ++ } ++ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { ++ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { ++ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE ++ } ++ } ++ ++ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { ++ emitter.tag_data.handle = []byte{'!'} ++ } ++ emitter.scalar_data.style = style ++ return true ++} ++ ++// Write an anchor. ++func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { ++ if emitter.anchor_data.anchor == nil { ++ return true ++ } ++ c := []byte{'&'} ++ if emitter.anchor_data.alias { ++ c[0] = '*' ++ } ++ if !yaml_emitter_write_indicator(emitter, c, true, false, false) { ++ return false ++ } ++ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) ++} ++ ++// Write a tag. ++func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { ++ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { ++ return true ++ } ++ if len(emitter.tag_data.handle) > 0 { ++ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { ++ return false ++ } ++ if len(emitter.tag_data.suffix) > 0 { ++ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { ++ return false ++ } ++ } ++ } else { ++ // [Go] Allocate these slices elsewhere. ++ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { ++ return false ++ } ++ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { ++ return false ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { ++ return false ++ } ++ } ++ return true ++} ++ ++// Write a scalar. ++func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { ++ switch emitter.scalar_data.style { ++ case yaml_PLAIN_SCALAR_STYLE: ++ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) ++ ++ case yaml_SINGLE_QUOTED_SCALAR_STYLE: ++ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) ++ ++ case yaml_DOUBLE_QUOTED_SCALAR_STYLE: ++ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) ++ ++ case yaml_LITERAL_SCALAR_STYLE: ++ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) ++ ++ case yaml_FOLDED_SCALAR_STYLE: ++ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) ++ } ++ panic("unknown scalar style") ++} ++ ++// Write a head comment. ++func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { ++ if len(emitter.tail_comment) > 0 { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { ++ return false ++ } ++ emitter.tail_comment = emitter.tail_comment[:0] ++ emitter.foot_indent = emitter.indent ++ if emitter.foot_indent < 0 { ++ emitter.foot_indent = 0 ++ } ++ } ++ ++ if len(emitter.head_comment) == 0 { ++ return true ++ } ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ if !yaml_emitter_write_comment(emitter, emitter.head_comment) { ++ return false ++ } ++ emitter.head_comment = emitter.head_comment[:0] ++ return true ++} ++ ++// Write an line comment. ++func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { ++ if len(emitter.line_comment) == 0 { ++ return true ++ } ++ if !emitter.whitespace { ++ if !put(emitter, ' ') { ++ return false ++ } ++ } ++ if !yaml_emitter_write_comment(emitter, emitter.line_comment) { ++ return false ++ } ++ emitter.line_comment = emitter.line_comment[:0] ++ return true ++} ++ ++// Write a foot comment. ++func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { ++ if len(emitter.foot_comment) == 0 { ++ return true ++ } ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { ++ return false ++ } ++ emitter.foot_comment = emitter.foot_comment[:0] ++ emitter.foot_indent = emitter.indent ++ if emitter.foot_indent < 0 { ++ emitter.foot_indent = 0 ++ } ++ return true ++} ++ ++// Check if a %YAML directive is valid. ++func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { ++ if version_directive.major != 1 || version_directive.minor != 1 { ++ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") ++ } ++ return true ++} ++ ++// Check if a %TAG directive is valid. ++func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { ++ handle := tag_directive.handle ++ prefix := tag_directive.prefix ++ if len(handle) == 0 { ++ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") ++ } ++ if handle[0] != '!' { ++ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") ++ } ++ if handle[len(handle)-1] != '!' { ++ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") ++ } ++ for i := 1; i < len(handle)-1; i += width(handle[i]) { ++ if !is_alpha(handle, i) { ++ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") ++ } ++ } ++ if len(prefix) == 0 { ++ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") ++ } ++ return true ++} ++ ++// Check if an anchor is valid. ++func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { ++ if len(anchor) == 0 { ++ problem := "anchor value must not be empty" ++ if alias { ++ problem = "alias value must not be empty" ++ } ++ return yaml_emitter_set_emitter_error(emitter, problem) ++ } ++ for i := 0; i < len(anchor); i += width(anchor[i]) { ++ if !is_alpha(anchor, i) { ++ problem := "anchor value must contain alphanumerical characters only" ++ if alias { ++ problem = "alias value must contain alphanumerical characters only" ++ } ++ return yaml_emitter_set_emitter_error(emitter, problem) ++ } ++ } ++ emitter.anchor_data.anchor = anchor ++ emitter.anchor_data.alias = alias ++ return true ++} ++ ++// Check if a tag is valid. ++func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { ++ if len(tag) == 0 { ++ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") ++ } ++ for i := 0; i < len(emitter.tag_directives); i++ { ++ tag_directive := &emitter.tag_directives[i] ++ if bytes.HasPrefix(tag, tag_directive.prefix) { ++ emitter.tag_data.handle = tag_directive.handle ++ emitter.tag_data.suffix = tag[len(tag_directive.prefix):] ++ return true ++ } ++ } ++ emitter.tag_data.suffix = tag ++ return true ++} ++ ++// Check if a scalar is valid. ++func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { ++ var ( ++ block_indicators = false ++ flow_indicators = false ++ line_breaks = false ++ special_characters = false ++ tab_characters = false ++ ++ leading_space = false ++ leading_break = false ++ trailing_space = false ++ trailing_break = false ++ break_space = false ++ space_break = false ++ ++ preceded_by_whitespace = false ++ followed_by_whitespace = false ++ previous_space = false ++ previous_break = false ++ ) ++ ++ emitter.scalar_data.value = value ++ ++ if len(value) == 0 { ++ emitter.scalar_data.multiline = false ++ emitter.scalar_data.flow_plain_allowed = false ++ emitter.scalar_data.block_plain_allowed = true ++ emitter.scalar_data.single_quoted_allowed = true ++ emitter.scalar_data.block_allowed = false ++ return true ++ } ++ ++ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { ++ block_indicators = true ++ flow_indicators = true ++ } ++ ++ preceded_by_whitespace = true ++ for i, w := 0, 0; i < len(value); i += w { ++ w = width(value[i]) ++ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) ++ ++ if i == 0 { ++ switch value[i] { ++ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': ++ flow_indicators = true ++ block_indicators = true ++ case '?', ':': ++ flow_indicators = true ++ if followed_by_whitespace { ++ block_indicators = true ++ } ++ case '-': ++ if followed_by_whitespace { ++ flow_indicators = true ++ block_indicators = true ++ } ++ } ++ } else { ++ switch value[i] { ++ case ',', '?', '[', ']', '{', '}': ++ flow_indicators = true ++ case ':': ++ flow_indicators = true ++ if followed_by_whitespace { ++ block_indicators = true ++ } ++ case '#': ++ if preceded_by_whitespace { ++ flow_indicators = true ++ block_indicators = true ++ } ++ } ++ } ++ ++ if value[i] == '\t' { ++ tab_characters = true ++ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { ++ special_characters = true ++ } ++ if is_space(value, i) { ++ if i == 0 { ++ leading_space = true ++ } ++ if i+width(value[i]) == len(value) { ++ trailing_space = true ++ } ++ if previous_break { ++ break_space = true ++ } ++ previous_space = true ++ previous_break = false ++ } else if is_break(value, i) { ++ line_breaks = true ++ if i == 0 { ++ leading_break = true ++ } ++ if i+width(value[i]) == len(value) { ++ trailing_break = true ++ } ++ if previous_space { ++ space_break = true ++ } ++ previous_space = false ++ previous_break = true ++ } else { ++ previous_space = false ++ previous_break = false ++ } ++ ++ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. ++ preceded_by_whitespace = is_blankz(value, i) ++ } ++ ++ emitter.scalar_data.multiline = line_breaks ++ emitter.scalar_data.flow_plain_allowed = true ++ emitter.scalar_data.block_plain_allowed = true ++ emitter.scalar_data.single_quoted_allowed = true ++ emitter.scalar_data.block_allowed = true ++ ++ if leading_space || leading_break || trailing_space || trailing_break { ++ emitter.scalar_data.flow_plain_allowed = false ++ emitter.scalar_data.block_plain_allowed = false ++ } ++ if trailing_space { ++ emitter.scalar_data.block_allowed = false ++ } ++ if break_space { ++ emitter.scalar_data.flow_plain_allowed = false ++ emitter.scalar_data.block_plain_allowed = false ++ emitter.scalar_data.single_quoted_allowed = false ++ } ++ if space_break || tab_characters || special_characters { ++ emitter.scalar_data.flow_plain_allowed = false ++ emitter.scalar_data.block_plain_allowed = false ++ emitter.scalar_data.single_quoted_allowed = false ++ } ++ if space_break || special_characters { ++ emitter.scalar_data.block_allowed = false ++ } ++ if line_breaks { ++ emitter.scalar_data.flow_plain_allowed = false ++ emitter.scalar_data.block_plain_allowed = false ++ } ++ if flow_indicators { ++ emitter.scalar_data.flow_plain_allowed = false ++ } ++ if block_indicators { ++ emitter.scalar_data.block_plain_allowed = false ++ } ++ return true ++} ++ ++// Check if the event data is valid. ++func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { ++ ++ emitter.anchor_data.anchor = nil ++ emitter.tag_data.handle = nil ++ emitter.tag_data.suffix = nil ++ emitter.scalar_data.value = nil ++ ++ if len(event.head_comment) > 0 { ++ emitter.head_comment = event.head_comment ++ } ++ if len(event.line_comment) > 0 { ++ emitter.line_comment = event.line_comment ++ } ++ if len(event.foot_comment) > 0 { ++ emitter.foot_comment = event.foot_comment ++ } ++ if len(event.tail_comment) > 0 { ++ emitter.tail_comment = event.tail_comment ++ } ++ ++ switch event.typ { ++ case yaml_ALIAS_EVENT: ++ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { ++ return false ++ } ++ ++ case yaml_SCALAR_EVENT: ++ if len(event.anchor) > 0 { ++ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { ++ return false ++ } ++ } ++ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { ++ if !yaml_emitter_analyze_tag(emitter, event.tag) { ++ return false ++ } ++ } ++ if !yaml_emitter_analyze_scalar(emitter, event.value) { ++ return false ++ } ++ ++ case yaml_SEQUENCE_START_EVENT: ++ if len(event.anchor) > 0 { ++ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { ++ return false ++ } ++ } ++ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { ++ if !yaml_emitter_analyze_tag(emitter, event.tag) { ++ return false ++ } ++ } ++ ++ case yaml_MAPPING_START_EVENT: ++ if len(event.anchor) > 0 { ++ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { ++ return false ++ } ++ } ++ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { ++ if !yaml_emitter_analyze_tag(emitter, event.tag) { ++ return false ++ } ++ } ++ } ++ return true ++} ++ ++// Write the BOM character. ++func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { ++ if !flush(emitter) { ++ return false ++ } ++ pos := emitter.buffer_pos ++ emitter.buffer[pos+0] = '\xEF' ++ emitter.buffer[pos+1] = '\xBB' ++ emitter.buffer[pos+2] = '\xBF' ++ emitter.buffer_pos += 3 ++ return true ++} ++ ++func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { ++ indent := emitter.indent ++ if indent < 0 { ++ indent = 0 ++ } ++ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { ++ if !put_break(emitter) { ++ return false ++ } ++ } ++ if emitter.foot_indent == indent { ++ if !put_break(emitter) { ++ return false ++ } ++ } ++ for emitter.column < indent { ++ if !put(emitter, ' ') { ++ return false ++ } ++ } ++ emitter.whitespace = true ++ //emitter.indention = true ++ emitter.space_above = false ++ emitter.foot_indent = -1 ++ return true ++} ++ ++func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { ++ if need_whitespace && !emitter.whitespace { ++ if !put(emitter, ' ') { ++ return false ++ } ++ } ++ if !write_all(emitter, indicator) { ++ return false ++ } ++ emitter.whitespace = is_whitespace ++ emitter.indention = (emitter.indention && is_indention) ++ emitter.open_ended = false ++ return true ++} ++ ++func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { ++ if !write_all(emitter, value) { ++ return false ++ } ++ emitter.whitespace = false ++ emitter.indention = false ++ return true ++} ++ ++func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { ++ if !emitter.whitespace { ++ if !put(emitter, ' ') { ++ return false ++ } ++ } ++ if !write_all(emitter, value) { ++ return false ++ } ++ emitter.whitespace = false ++ emitter.indention = false ++ return true ++} ++ ++func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { ++ if need_whitespace && !emitter.whitespace { ++ if !put(emitter, ' ') { ++ return false ++ } ++ } ++ for i := 0; i < len(value); { ++ var must_write bool ++ switch value[i] { ++ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': ++ must_write = true ++ default: ++ must_write = is_alpha(value, i) ++ } ++ if must_write { ++ if !write(emitter, value, &i) { ++ return false ++ } ++ } else { ++ w := width(value[i]) ++ for k := 0; k < w; k++ { ++ octet := value[i] ++ i++ ++ if !put(emitter, '%') { ++ return false ++ } ++ ++ c := octet >> 4 ++ if c < 10 { ++ c += '0' ++ } else { ++ c += 'A' - 10 ++ } ++ if !put(emitter, c) { ++ return false ++ } ++ ++ c = octet & 0x0f ++ if c < 10 { ++ c += '0' ++ } else { ++ c += 'A' - 10 ++ } ++ if !put(emitter, c) { ++ return false ++ } ++ } ++ } ++ } ++ emitter.whitespace = false ++ emitter.indention = false ++ return true ++} ++ ++func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { ++ if len(value) > 0 && !emitter.whitespace { ++ if !put(emitter, ' ') { ++ return false ++ } ++ } ++ ++ spaces := false ++ breaks := false ++ for i := 0; i < len(value); { ++ if is_space(value, i) { ++ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ i += width(value[i]) ++ } else { ++ if !write(emitter, value, &i) { ++ return false ++ } ++ } ++ spaces = true ++ } else if is_break(value, i) { ++ if !breaks && value[i] == '\n' { ++ if !put_break(emitter) { ++ return false ++ } ++ } ++ if !write_break(emitter, value, &i) { ++ return false ++ } ++ //emitter.indention = true ++ breaks = true ++ } else { ++ if breaks { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ if !write(emitter, value, &i) { ++ return false ++ } ++ emitter.indention = false ++ spaces = false ++ breaks = false ++ } ++ } ++ ++ if len(value) > 0 { ++ emitter.whitespace = false ++ } ++ emitter.indention = false ++ if emitter.root_context { ++ emitter.open_ended = true ++ } ++ ++ return true ++} ++ ++func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { ++ ++ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { ++ return false ++ } ++ ++ spaces := false ++ breaks := false ++ for i := 0; i < len(value); { ++ if is_space(value, i) { ++ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ i += width(value[i]) ++ } else { ++ if !write(emitter, value, &i) { ++ return false ++ } ++ } ++ spaces = true ++ } else if is_break(value, i) { ++ if !breaks && value[i] == '\n' { ++ if !put_break(emitter) { ++ return false ++ } ++ } ++ if !write_break(emitter, value, &i) { ++ return false ++ } ++ //emitter.indention = true ++ breaks = true ++ } else { ++ if breaks { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ if value[i] == '\'' { ++ if !put(emitter, '\'') { ++ return false ++ } ++ } ++ if !write(emitter, value, &i) { ++ return false ++ } ++ emitter.indention = false ++ spaces = false ++ breaks = false ++ } ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { ++ return false ++ } ++ emitter.whitespace = false ++ emitter.indention = false ++ return true ++} ++ ++func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { ++ spaces := false ++ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { ++ return false ++ } ++ ++ for i := 0; i < len(value); { ++ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || ++ is_bom(value, i) || is_break(value, i) || ++ value[i] == '"' || value[i] == '\\' { ++ ++ octet := value[i] ++ ++ var w int ++ var v rune ++ switch { ++ case octet&0x80 == 0x00: ++ w, v = 1, rune(octet&0x7F) ++ case octet&0xE0 == 0xC0: ++ w, v = 2, rune(octet&0x1F) ++ case octet&0xF0 == 0xE0: ++ w, v = 3, rune(octet&0x0F) ++ case octet&0xF8 == 0xF0: ++ w, v = 4, rune(octet&0x07) ++ } ++ for k := 1; k < w; k++ { ++ octet = value[i+k] ++ v = (v << 6) + (rune(octet) & 0x3F) ++ } ++ i += w ++ ++ if !put(emitter, '\\') { ++ return false ++ } ++ ++ var ok bool ++ switch v { ++ case 0x00: ++ ok = put(emitter, '0') ++ case 0x07: ++ ok = put(emitter, 'a') ++ case 0x08: ++ ok = put(emitter, 'b') ++ case 0x09: ++ ok = put(emitter, 't') ++ case 0x0A: ++ ok = put(emitter, 'n') ++ case 0x0b: ++ ok = put(emitter, 'v') ++ case 0x0c: ++ ok = put(emitter, 'f') ++ case 0x0d: ++ ok = put(emitter, 'r') ++ case 0x1b: ++ ok = put(emitter, 'e') ++ case 0x22: ++ ok = put(emitter, '"') ++ case 0x5c: ++ ok = put(emitter, '\\') ++ case 0x85: ++ ok = put(emitter, 'N') ++ case 0xA0: ++ ok = put(emitter, '_') ++ case 0x2028: ++ ok = put(emitter, 'L') ++ case 0x2029: ++ ok = put(emitter, 'P') ++ default: ++ if v <= 0xFF { ++ ok = put(emitter, 'x') ++ w = 2 ++ } else if v <= 0xFFFF { ++ ok = put(emitter, 'u') ++ w = 4 ++ } else { ++ ok = put(emitter, 'U') ++ w = 8 ++ } ++ for k := (w - 1) * 4; ok && k >= 0; k -= 4 { ++ digit := byte((v >> uint(k)) & 0x0F) ++ if digit < 10 { ++ ok = put(emitter, digit+'0') ++ } else { ++ ok = put(emitter, digit+'A'-10) ++ } ++ } ++ } ++ if !ok { ++ return false ++ } ++ spaces = false ++ } else if is_space(value, i) { ++ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ if is_space(value, i+1) { ++ if !put(emitter, '\\') { ++ return false ++ } ++ } ++ i += width(value[i]) ++ } else if !write(emitter, value, &i) { ++ return false ++ } ++ spaces = true ++ } else { ++ if !write(emitter, value, &i) { ++ return false ++ } ++ spaces = false ++ } ++ } ++ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { ++ return false ++ } ++ emitter.whitespace = false ++ emitter.indention = false ++ return true ++} ++ ++func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { ++ if is_space(value, 0) || is_break(value, 0) { ++ indent_hint := []byte{'0' + byte(emitter.best_indent)} ++ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { ++ return false ++ } ++ } ++ ++ emitter.open_ended = false ++ ++ var chomp_hint [1]byte ++ if len(value) == 0 { ++ chomp_hint[0] = '-' ++ } else { ++ i := len(value) - 1 ++ for value[i]&0xC0 == 0x80 { ++ i-- ++ } ++ if !is_break(value, i) { ++ chomp_hint[0] = '-' ++ } else if i == 0 { ++ chomp_hint[0] = '+' ++ emitter.open_ended = true ++ } else { ++ i-- ++ for value[i]&0xC0 == 0x80 { ++ i-- ++ } ++ if is_break(value, i) { ++ chomp_hint[0] = '+' ++ emitter.open_ended = true ++ } ++ } ++ } ++ if chomp_hint[0] != 0 { ++ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { ++ return false ++ } ++ } ++ return true ++} ++ ++func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { ++ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { ++ return false ++ } ++ if !yaml_emitter_write_block_scalar_hints(emitter, value) { ++ return false ++ } ++ if !yaml_emitter_process_line_comment(emitter) { ++ return false ++ } ++ //emitter.indention = true ++ emitter.whitespace = true ++ breaks := true ++ for i := 0; i < len(value); { ++ if is_break(value, i) { ++ if !write_break(emitter, value, &i) { ++ return false ++ } ++ //emitter.indention = true ++ breaks = true ++ } else { ++ if breaks { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ } ++ if !write(emitter, value, &i) { ++ return false ++ } ++ emitter.indention = false ++ breaks = false ++ } ++ } ++ ++ return true ++} ++ ++func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { ++ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { ++ return false ++ } ++ if !yaml_emitter_write_block_scalar_hints(emitter, value) { ++ return false ++ } ++ if !yaml_emitter_process_line_comment(emitter) { ++ return false ++ } ++ ++ //emitter.indention = true ++ emitter.whitespace = true ++ ++ breaks := true ++ leading_spaces := true ++ for i := 0; i < len(value); { ++ if is_break(value, i) { ++ if !breaks && !leading_spaces && value[i] == '\n' { ++ k := 0 ++ for is_break(value, k) { ++ k += width(value[k]) ++ } ++ if !is_blankz(value, k) { ++ if !put_break(emitter) { ++ return false ++ } ++ } ++ } ++ if !write_break(emitter, value, &i) { ++ return false ++ } ++ //emitter.indention = true ++ breaks = true ++ } else { ++ if breaks { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ leading_spaces = is_blank(value, i) ++ } ++ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { ++ if !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ i += width(value[i]) ++ } else { ++ if !write(emitter, value, &i) { ++ return false ++ } ++ } ++ emitter.indention = false ++ breaks = false ++ } ++ } ++ return true ++} ++ ++func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { ++ breaks := false ++ pound := false ++ for i := 0; i < len(comment); { ++ if is_break(comment, i) { ++ if !write_break(emitter, comment, &i) { ++ return false ++ } ++ //emitter.indention = true ++ breaks = true ++ pound = false ++ } else { ++ if breaks && !yaml_emitter_write_indent(emitter) { ++ return false ++ } ++ if !pound { ++ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { ++ return false ++ } ++ pound = true ++ } ++ if !write(emitter, comment, &i) { ++ return false ++ } ++ emitter.indention = false ++ breaks = false ++ } ++ } ++ if !breaks && !put_break(emitter) { ++ return false ++ } ++ ++ emitter.whitespace = true ++ //emitter.indention = true ++ return true ++} +diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go +new file mode 100644 +index 000000000000..de9e72a3e638 +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/encode.go +@@ -0,0 +1,577 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package yaml ++ ++import ( ++ "encoding" ++ "fmt" ++ "io" ++ "reflect" ++ "regexp" ++ "sort" ++ "strconv" ++ "strings" ++ "time" ++ "unicode/utf8" ++) ++ ++type encoder struct { ++ emitter yaml_emitter_t ++ event yaml_event_t ++ out []byte ++ flow bool ++ indent int ++ doneInit bool ++} ++ ++func newEncoder() *encoder { ++ e := &encoder{} ++ yaml_emitter_initialize(&e.emitter) ++ yaml_emitter_set_output_string(&e.emitter, &e.out) ++ yaml_emitter_set_unicode(&e.emitter, true) ++ return e ++} ++ ++func newEncoderWithWriter(w io.Writer) *encoder { ++ e := &encoder{} ++ yaml_emitter_initialize(&e.emitter) ++ yaml_emitter_set_output_writer(&e.emitter, w) ++ yaml_emitter_set_unicode(&e.emitter, true) ++ return e ++} ++ ++func (e *encoder) init() { ++ if e.doneInit { ++ return ++ } ++ if e.indent == 0 { ++ e.indent = 4 ++ } ++ e.emitter.best_indent = e.indent ++ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) ++ e.emit() ++ e.doneInit = true ++} ++ ++func (e *encoder) finish() { ++ e.emitter.open_ended = false ++ yaml_stream_end_event_initialize(&e.event) ++ e.emit() ++} ++ ++func (e *encoder) destroy() { ++ yaml_emitter_delete(&e.emitter) ++} ++ ++func (e *encoder) emit() { ++ // This will internally delete the e.event value. ++ e.must(yaml_emitter_emit(&e.emitter, &e.event)) ++} ++ ++func (e *encoder) must(ok bool) { ++ if !ok { ++ msg := e.emitter.problem ++ if msg == "" { ++ msg = "unknown problem generating YAML content" ++ } ++ failf("%s", msg) ++ } ++} ++ ++func (e *encoder) marshalDoc(tag string, in reflect.Value) { ++ e.init() ++ var node *Node ++ if in.IsValid() { ++ node, _ = in.Interface().(*Node) ++ } ++ if node != nil && node.Kind == DocumentNode { ++ e.nodev(in) ++ } else { ++ yaml_document_start_event_initialize(&e.event, nil, nil, true) ++ e.emit() ++ e.marshal(tag, in) ++ yaml_document_end_event_initialize(&e.event, true) ++ e.emit() ++ } ++} ++ ++func (e *encoder) marshal(tag string, in reflect.Value) { ++ tag = shortTag(tag) ++ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { ++ e.nilv() ++ return ++ } ++ iface := in.Interface() ++ switch value := iface.(type) { ++ case *Node: ++ e.nodev(in) ++ return ++ case Node: ++ if !in.CanAddr() { ++ var n = reflect.New(in.Type()).Elem() ++ n.Set(in) ++ in = n ++ } ++ e.nodev(in.Addr()) ++ return ++ case time.Time: ++ e.timev(tag, in) ++ return ++ case *time.Time: ++ e.timev(tag, in.Elem()) ++ return ++ case time.Duration: ++ e.stringv(tag, reflect.ValueOf(value.String())) ++ return ++ case Marshaler: ++ v, err := value.MarshalYAML() ++ if err != nil { ++ fail(err) ++ } ++ if v == nil { ++ e.nilv() ++ return ++ } ++ e.marshal(tag, reflect.ValueOf(v)) ++ return ++ case encoding.TextMarshaler: ++ text, err := value.MarshalText() ++ if err != nil { ++ fail(err) ++ } ++ in = reflect.ValueOf(string(text)) ++ case nil: ++ e.nilv() ++ return ++ } ++ switch in.Kind() { ++ case reflect.Interface: ++ e.marshal(tag, in.Elem()) ++ case reflect.Map: ++ e.mapv(tag, in) ++ case reflect.Ptr: ++ e.marshal(tag, in.Elem()) ++ case reflect.Struct: ++ e.structv(tag, in) ++ case reflect.Slice, reflect.Array: ++ e.slicev(tag, in) ++ case reflect.String: ++ e.stringv(tag, in) ++ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ++ e.intv(tag, in) ++ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: ++ e.uintv(tag, in) ++ case reflect.Float32, reflect.Float64: ++ e.floatv(tag, in) ++ case reflect.Bool: ++ e.boolv(tag, in) ++ default: ++ panic("cannot marshal type: " + in.Type().String()) ++ } ++} ++ ++func (e *encoder) mapv(tag string, in reflect.Value) { ++ e.mappingv(tag, func() { ++ keys := keyList(in.MapKeys()) ++ sort.Sort(keys) ++ for _, k := range keys { ++ e.marshal("", k) ++ e.marshal("", in.MapIndex(k)) ++ } ++ }) ++} ++ ++func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { ++ for _, num := range index { ++ for { ++ if v.Kind() == reflect.Ptr { ++ if v.IsNil() { ++ return reflect.Value{} ++ } ++ v = v.Elem() ++ continue ++ } ++ break ++ } ++ v = v.Field(num) ++ } ++ return v ++} ++ ++func (e *encoder) structv(tag string, in reflect.Value) { ++ sinfo, err := getStructInfo(in.Type()) ++ if err != nil { ++ panic(err) ++ } ++ e.mappingv(tag, func() { ++ for _, info := range sinfo.FieldsList { ++ var value reflect.Value ++ if info.Inline == nil { ++ value = in.Field(info.Num) ++ } else { ++ value = e.fieldByIndex(in, info.Inline) ++ if !value.IsValid() { ++ continue ++ } ++ } ++ if info.OmitEmpty && isZero(value) { ++ continue ++ } ++ e.marshal("", reflect.ValueOf(info.Key)) ++ e.flow = info.Flow ++ e.marshal("", value) ++ } ++ if sinfo.InlineMap >= 0 { ++ m := in.Field(sinfo.InlineMap) ++ if m.Len() > 0 { ++ e.flow = false ++ keys := keyList(m.MapKeys()) ++ sort.Sort(keys) ++ for _, k := range keys { ++ if _, found := sinfo.FieldsMap[k.String()]; found { ++ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) ++ } ++ e.marshal("", k) ++ e.flow = false ++ e.marshal("", m.MapIndex(k)) ++ } ++ } ++ } ++ }) ++} ++ ++func (e *encoder) mappingv(tag string, f func()) { ++ implicit := tag == "" ++ style := yaml_BLOCK_MAPPING_STYLE ++ if e.flow { ++ e.flow = false ++ style = yaml_FLOW_MAPPING_STYLE ++ } ++ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) ++ e.emit() ++ f() ++ yaml_mapping_end_event_initialize(&e.event) ++ e.emit() ++} ++ ++func (e *encoder) slicev(tag string, in reflect.Value) { ++ implicit := tag == "" ++ style := yaml_BLOCK_SEQUENCE_STYLE ++ if e.flow { ++ e.flow = false ++ style = yaml_FLOW_SEQUENCE_STYLE ++ } ++ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) ++ e.emit() ++ n := in.Len() ++ for i := 0; i < n; i++ { ++ e.marshal("", in.Index(i)) ++ } ++ e.must(yaml_sequence_end_event_initialize(&e.event)) ++ e.emit() ++} ++ ++// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. ++// ++// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported ++// in YAML 1.2 and by this package, but these should be marshalled quoted for ++// the time being for compatibility with other parsers. ++func isBase60Float(s string) (result bool) { ++ // Fast path. ++ if s == "" { ++ return false ++ } ++ c := s[0] ++ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { ++ return false ++ } ++ // Do the full match. ++ return base60float.MatchString(s) ++} ++ ++// From http://yaml.org/type/float.html, except the regular expression there ++// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. ++var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) ++ ++// isOldBool returns whether s is bool notation as defined in YAML 1.1. ++// ++// We continue to force strings that YAML 1.1 would interpret as booleans to be ++// rendered as quotes strings so that the marshalled output valid for YAML 1.1 ++// parsing. ++func isOldBool(s string) (result bool) { ++ switch s { ++ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", ++ "n", "N", "no", "No", "NO", "off", "Off", "OFF": ++ return true ++ default: ++ return false ++ } ++} ++ ++func (e *encoder) stringv(tag string, in reflect.Value) { ++ var style yaml_scalar_style_t ++ s := in.String() ++ canUsePlain := true ++ switch { ++ case !utf8.ValidString(s): ++ if tag == binaryTag { ++ failf("explicitly tagged !!binary data must be base64-encoded") ++ } ++ if tag != "" { ++ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) ++ } ++ // It can't be encoded directly as YAML so use a binary tag ++ // and encode it as base64. ++ tag = binaryTag ++ s = encodeBase64(s) ++ case tag == "": ++ // Check to see if it would resolve to a specific ++ // tag when encoded unquoted. If it doesn't, ++ // there's no need to quote it. ++ rtag, _ := resolve("", s) ++ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) ++ } ++ // Note: it's possible for user code to emit invalid YAML ++ // if they explicitly specify a tag and a string containing ++ // text that's incompatible with that tag. ++ switch { ++ case strings.Contains(s, "\n"): ++ if e.flow { ++ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE ++ } else { ++ style = yaml_LITERAL_SCALAR_STYLE ++ } ++ case canUsePlain: ++ style = yaml_PLAIN_SCALAR_STYLE ++ default: ++ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE ++ } ++ e.emitScalar(s, "", tag, style, nil, nil, nil, nil) ++} ++ ++func (e *encoder) boolv(tag string, in reflect.Value) { ++ var s string ++ if in.Bool() { ++ s = "true" ++ } else { ++ s = "false" ++ } ++ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) ++} ++ ++func (e *encoder) intv(tag string, in reflect.Value) { ++ s := strconv.FormatInt(in.Int(), 10) ++ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) ++} ++ ++func (e *encoder) uintv(tag string, in reflect.Value) { ++ s := strconv.FormatUint(in.Uint(), 10) ++ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) ++} ++ ++func (e *encoder) timev(tag string, in reflect.Value) { ++ t := in.Interface().(time.Time) ++ s := t.Format(time.RFC3339Nano) ++ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) ++} ++ ++func (e *encoder) floatv(tag string, in reflect.Value) { ++ // Issue #352: When formatting, use the precision of the underlying value ++ precision := 64 ++ if in.Kind() == reflect.Float32 { ++ precision = 32 ++ } ++ ++ s := strconv.FormatFloat(in.Float(), 'g', -1, precision) ++ switch s { ++ case "+Inf": ++ s = ".inf" ++ case "-Inf": ++ s = "-.inf" ++ case "NaN": ++ s = ".nan" ++ } ++ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) ++} ++ ++func (e *encoder) nilv() { ++ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) ++} ++ ++func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { ++ // TODO Kill this function. Replace all initialize calls by their underlining Go literals. ++ implicit := tag == "" ++ if !implicit { ++ tag = longTag(tag) ++ } ++ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) ++ e.event.head_comment = head ++ e.event.line_comment = line ++ e.event.foot_comment = foot ++ e.event.tail_comment = tail ++ e.emit() ++} ++ ++func (e *encoder) nodev(in reflect.Value) { ++ e.node(in.Interface().(*Node), "") ++} ++ ++func (e *encoder) node(node *Node, tail string) { ++ // Zero nodes behave as nil. ++ if node.Kind == 0 && node.IsZero() { ++ e.nilv() ++ return ++ } ++ ++ // If the tag was not explicitly requested, and dropping it won't change the ++ // implicit tag of the value, don't include it in the presentation. ++ var tag = node.Tag ++ var stag = shortTag(tag) ++ var forceQuoting bool ++ if tag != "" && node.Style&TaggedStyle == 0 { ++ if node.Kind == ScalarNode { ++ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { ++ tag = "" ++ } else { ++ rtag, _ := resolve("", node.Value) ++ if rtag == stag { ++ tag = "" ++ } else if stag == strTag { ++ tag = "" ++ forceQuoting = true ++ } ++ } ++ } else { ++ var rtag string ++ switch node.Kind { ++ case MappingNode: ++ rtag = mapTag ++ case SequenceNode: ++ rtag = seqTag ++ } ++ if rtag == stag { ++ tag = "" ++ } ++ } ++ } ++ ++ switch node.Kind { ++ case DocumentNode: ++ yaml_document_start_event_initialize(&e.event, nil, nil, true) ++ e.event.head_comment = []byte(node.HeadComment) ++ e.emit() ++ for _, node := range node.Content { ++ e.node(node, "") ++ } ++ yaml_document_end_event_initialize(&e.event, true) ++ e.event.foot_comment = []byte(node.FootComment) ++ e.emit() ++ ++ case SequenceNode: ++ style := yaml_BLOCK_SEQUENCE_STYLE ++ if node.Style&FlowStyle != 0 { ++ style = yaml_FLOW_SEQUENCE_STYLE ++ } ++ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) ++ e.event.head_comment = []byte(node.HeadComment) ++ e.emit() ++ for _, node := range node.Content { ++ e.node(node, "") ++ } ++ e.must(yaml_sequence_end_event_initialize(&e.event)) ++ e.event.line_comment = []byte(node.LineComment) ++ e.event.foot_comment = []byte(node.FootComment) ++ e.emit() ++ ++ case MappingNode: ++ style := yaml_BLOCK_MAPPING_STYLE ++ if node.Style&FlowStyle != 0 { ++ style = yaml_FLOW_MAPPING_STYLE ++ } ++ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) ++ e.event.tail_comment = []byte(tail) ++ e.event.head_comment = []byte(node.HeadComment) ++ e.emit() ++ ++ // The tail logic below moves the foot comment of prior keys to the following key, ++ // since the value for each key may be a nested structure and the foot needs to be ++ // processed only the entirety of the value is streamed. The last tail is processed ++ // with the mapping end event. ++ var tail string ++ for i := 0; i+1 < len(node.Content); i += 2 { ++ k := node.Content[i] ++ foot := k.FootComment ++ if foot != "" { ++ kopy := *k ++ kopy.FootComment = "" ++ k = &kopy ++ } ++ e.node(k, tail) ++ tail = foot ++ ++ v := node.Content[i+1] ++ e.node(v, "") ++ } ++ ++ yaml_mapping_end_event_initialize(&e.event) ++ e.event.tail_comment = []byte(tail) ++ e.event.line_comment = []byte(node.LineComment) ++ e.event.foot_comment = []byte(node.FootComment) ++ e.emit() ++ ++ case AliasNode: ++ yaml_alias_event_initialize(&e.event, []byte(node.Value)) ++ e.event.head_comment = []byte(node.HeadComment) ++ e.event.line_comment = []byte(node.LineComment) ++ e.event.foot_comment = []byte(node.FootComment) ++ e.emit() ++ ++ case ScalarNode: ++ value := node.Value ++ if !utf8.ValidString(value) { ++ if stag == binaryTag { ++ failf("explicitly tagged !!binary data must be base64-encoded") ++ } ++ if stag != "" { ++ failf("cannot marshal invalid UTF-8 data as %s", stag) ++ } ++ // It can't be encoded directly as YAML so use a binary tag ++ // and encode it as base64. ++ tag = binaryTag ++ value = encodeBase64(value) ++ } ++ ++ style := yaml_PLAIN_SCALAR_STYLE ++ switch { ++ case node.Style&DoubleQuotedStyle != 0: ++ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE ++ case node.Style&SingleQuotedStyle != 0: ++ style = yaml_SINGLE_QUOTED_SCALAR_STYLE ++ case node.Style&LiteralStyle != 0: ++ style = yaml_LITERAL_SCALAR_STYLE ++ case node.Style&FoldedStyle != 0: ++ style = yaml_FOLDED_SCALAR_STYLE ++ case strings.Contains(value, "\n"): ++ style = yaml_LITERAL_SCALAR_STYLE ++ case forceQuoting: ++ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE ++ } ++ ++ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) ++ default: ++ failf("cannot encode node with unknown kind %d", node.Kind) ++ } ++} +diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go +new file mode 100644 +index 000000000000..268558a0d632 +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/parserc.go +@@ -0,0 +1,1258 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// Copyright (c) 2006-2010 Kirill Simonov ++// ++// Permission is hereby granted, free of charge, to any person obtaining a copy of ++// this software and associated documentation files (the "Software"), to deal in ++// the Software without restriction, including without limitation the rights to ++// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++// of the Software, and to permit persons to whom the Software is furnished to do ++// so, subject to the following conditions: ++// ++// The above copyright notice and this permission notice shall be included in all ++// copies or substantial portions of the Software. ++// ++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++// SOFTWARE. ++ ++package yaml ++ ++import ( ++ "bytes" ++) ++ ++// The parser implements the following grammar: ++// ++// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END ++// implicit_document ::= block_node DOCUMENT-END* ++// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* ++// block_node_or_indentless_sequence ::= ++// ALIAS ++// | properties (block_content | indentless_block_sequence)? ++// | block_content ++// | indentless_block_sequence ++// block_node ::= ALIAS ++// | properties block_content? ++// | block_content ++// flow_node ::= ALIAS ++// | properties flow_content? ++// | flow_content ++// properties ::= TAG ANCHOR? | ANCHOR TAG? ++// block_content ::= block_collection | flow_collection | SCALAR ++// flow_content ::= flow_collection | SCALAR ++// block_collection ::= block_sequence | block_mapping ++// flow_collection ::= flow_sequence | flow_mapping ++// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END ++// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ ++// block_mapping ::= BLOCK-MAPPING_START ++// ((KEY block_node_or_indentless_sequence?)? ++// (VALUE block_node_or_indentless_sequence?)?)* ++// BLOCK-END ++// flow_sequence ::= FLOW-SEQUENCE-START ++// (flow_sequence_entry FLOW-ENTRY)* ++// flow_sequence_entry? ++// FLOW-SEQUENCE-END ++// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? ++// flow_mapping ::= FLOW-MAPPING-START ++// (flow_mapping_entry FLOW-ENTRY)* ++// flow_mapping_entry? ++// FLOW-MAPPING-END ++// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? ++ ++// Peek the next token in the token queue. ++func peek_token(parser *yaml_parser_t) *yaml_token_t { ++ if parser.token_available || yaml_parser_fetch_more_tokens(parser) { ++ token := &parser.tokens[parser.tokens_head] ++ yaml_parser_unfold_comments(parser, token) ++ return token ++ } ++ return nil ++} ++ ++// yaml_parser_unfold_comments walks through the comments queue and joins all ++// comments behind the position of the provided token into the respective ++// top-level comment slices in the parser. ++func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { ++ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { ++ comment := &parser.comments[parser.comments_head] ++ if len(comment.head) > 0 { ++ if token.typ == yaml_BLOCK_END_TOKEN { ++ // No heads on ends, so keep comment.head for a follow up token. ++ break ++ } ++ if len(parser.head_comment) > 0 { ++ parser.head_comment = append(parser.head_comment, '\n') ++ } ++ parser.head_comment = append(parser.head_comment, comment.head...) ++ } ++ if len(comment.foot) > 0 { ++ if len(parser.foot_comment) > 0 { ++ parser.foot_comment = append(parser.foot_comment, '\n') ++ } ++ parser.foot_comment = append(parser.foot_comment, comment.foot...) ++ } ++ if len(comment.line) > 0 { ++ if len(parser.line_comment) > 0 { ++ parser.line_comment = append(parser.line_comment, '\n') ++ } ++ parser.line_comment = append(parser.line_comment, comment.line...) ++ } ++ *comment = yaml_comment_t{} ++ parser.comments_head++ ++ } ++} ++ ++// Remove the next token from the queue (must be called after peek_token). ++func skip_token(parser *yaml_parser_t) { ++ parser.token_available = false ++ parser.tokens_parsed++ ++ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN ++ parser.tokens_head++ ++} ++ ++// Get the next event. ++func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { ++ // Erase the event object. ++ *event = yaml_event_t{} ++ ++ // No events after the end of the stream or error. ++ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { ++ return true ++ } ++ ++ // Generate the next event. ++ return yaml_parser_state_machine(parser, event) ++} ++ ++// Set parser error. ++func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { ++ parser.error = yaml_PARSER_ERROR ++ parser.problem = problem ++ parser.problem_mark = problem_mark ++ return false ++} ++ ++func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { ++ parser.error = yaml_PARSER_ERROR ++ parser.context = context ++ parser.context_mark = context_mark ++ parser.problem = problem ++ parser.problem_mark = problem_mark ++ return false ++} ++ ++// State dispatcher. ++func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { ++ //trace("yaml_parser_state_machine", "state:", parser.state.String()) ++ ++ switch parser.state { ++ case yaml_PARSE_STREAM_START_STATE: ++ return yaml_parser_parse_stream_start(parser, event) ++ ++ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: ++ return yaml_parser_parse_document_start(parser, event, true) ++ ++ case yaml_PARSE_DOCUMENT_START_STATE: ++ return yaml_parser_parse_document_start(parser, event, false) ++ ++ case yaml_PARSE_DOCUMENT_CONTENT_STATE: ++ return yaml_parser_parse_document_content(parser, event) ++ ++ case yaml_PARSE_DOCUMENT_END_STATE: ++ return yaml_parser_parse_document_end(parser, event) ++ ++ case yaml_PARSE_BLOCK_NODE_STATE: ++ return yaml_parser_parse_node(parser, event, true, false) ++ ++ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: ++ return yaml_parser_parse_node(parser, event, true, true) ++ ++ case yaml_PARSE_FLOW_NODE_STATE: ++ return yaml_parser_parse_node(parser, event, false, false) ++ ++ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: ++ return yaml_parser_parse_block_sequence_entry(parser, event, true) ++ ++ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: ++ return yaml_parser_parse_block_sequence_entry(parser, event, false) ++ ++ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: ++ return yaml_parser_parse_indentless_sequence_entry(parser, event) ++ ++ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: ++ return yaml_parser_parse_block_mapping_key(parser, event, true) ++ ++ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: ++ return yaml_parser_parse_block_mapping_key(parser, event, false) ++ ++ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: ++ return yaml_parser_parse_block_mapping_value(parser, event) ++ ++ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: ++ return yaml_parser_parse_flow_sequence_entry(parser, event, true) ++ ++ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: ++ return yaml_parser_parse_flow_sequence_entry(parser, event, false) ++ ++ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: ++ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) ++ ++ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: ++ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) ++ ++ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: ++ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) ++ ++ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: ++ return yaml_parser_parse_flow_mapping_key(parser, event, true) ++ ++ case yaml_PARSE_FLOW_MAPPING_KEY_STATE: ++ return yaml_parser_parse_flow_mapping_key(parser, event, false) ++ ++ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: ++ return yaml_parser_parse_flow_mapping_value(parser, event, false) ++ ++ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: ++ return yaml_parser_parse_flow_mapping_value(parser, event, true) ++ ++ default: ++ panic("invalid parser state") ++ } ++} ++ ++// Parse the production: ++// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END ++// ************ ++func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_STREAM_START_TOKEN { ++ return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) ++ } ++ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE ++ *event = yaml_event_t{ ++ typ: yaml_STREAM_START_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.end_mark, ++ encoding: token.encoding, ++ } ++ skip_token(parser) ++ return true ++} ++ ++// Parse the productions: ++// implicit_document ::= block_node DOCUMENT-END* ++// * ++// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* ++// ************************* ++func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { ++ ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ ++ // Parse extra document end indicators. ++ if !implicit { ++ for token.typ == yaml_DOCUMENT_END_TOKEN { ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ } ++ } ++ ++ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && ++ token.typ != yaml_TAG_DIRECTIVE_TOKEN && ++ token.typ != yaml_DOCUMENT_START_TOKEN && ++ token.typ != yaml_STREAM_END_TOKEN { ++ // Parse an implicit document. ++ if !yaml_parser_process_directives(parser, nil, nil) { ++ return false ++ } ++ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) ++ parser.state = yaml_PARSE_BLOCK_NODE_STATE ++ ++ var head_comment []byte ++ if len(parser.head_comment) > 0 { ++ // [Go] Scan the header comment backwards, and if an empty line is found, break ++ // the header so the part before the last empty line goes into the ++ // document header, while the bottom of it goes into a follow up event. ++ for i := len(parser.head_comment) - 1; i > 0; i-- { ++ if parser.head_comment[i] == '\n' { ++ if i == len(parser.head_comment)-1 { ++ head_comment = parser.head_comment[:i] ++ parser.head_comment = parser.head_comment[i+1:] ++ break ++ } else if parser.head_comment[i-1] == '\n' { ++ head_comment = parser.head_comment[:i-1] ++ parser.head_comment = parser.head_comment[i+1:] ++ break ++ } ++ } ++ } ++ } ++ ++ *event = yaml_event_t{ ++ typ: yaml_DOCUMENT_START_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.end_mark, ++ ++ head_comment: head_comment, ++ } ++ ++ } else if token.typ != yaml_STREAM_END_TOKEN { ++ // Parse an explicit document. ++ var version_directive *yaml_version_directive_t ++ var tag_directives []yaml_tag_directive_t ++ start_mark := token.start_mark ++ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { ++ return false ++ } ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_DOCUMENT_START_TOKEN { ++ yaml_parser_set_parser_error(parser, ++ "did not find expected ", token.start_mark) ++ return false ++ } ++ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) ++ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE ++ end_mark := token.end_mark ++ ++ *event = yaml_event_t{ ++ typ: yaml_DOCUMENT_START_EVENT, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ version_directive: version_directive, ++ tag_directives: tag_directives, ++ implicit: false, ++ } ++ skip_token(parser) ++ ++ } else { ++ // Parse the stream end. ++ parser.state = yaml_PARSE_END_STATE ++ *event = yaml_event_t{ ++ typ: yaml_STREAM_END_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.end_mark, ++ } ++ skip_token(parser) ++ } ++ ++ return true ++} ++ ++// Parse the productions: ++// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* ++// *********** ++// ++func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ ++ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || ++ token.typ == yaml_TAG_DIRECTIVE_TOKEN || ++ token.typ == yaml_DOCUMENT_START_TOKEN || ++ token.typ == yaml_DOCUMENT_END_TOKEN || ++ token.typ == yaml_STREAM_END_TOKEN { ++ parser.state = parser.states[len(parser.states)-1] ++ parser.states = parser.states[:len(parser.states)-1] ++ return yaml_parser_process_empty_scalar(parser, event, ++ token.start_mark) ++ } ++ return yaml_parser_parse_node(parser, event, true, false) ++} ++ ++// Parse the productions: ++// implicit_document ::= block_node DOCUMENT-END* ++// ************* ++// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* ++// ++func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ ++ start_mark := token.start_mark ++ end_mark := token.start_mark ++ ++ implicit := true ++ if token.typ == yaml_DOCUMENT_END_TOKEN { ++ end_mark = token.end_mark ++ skip_token(parser) ++ implicit = false ++ } ++ ++ parser.tag_directives = parser.tag_directives[:0] ++ ++ parser.state = yaml_PARSE_DOCUMENT_START_STATE ++ *event = yaml_event_t{ ++ typ: yaml_DOCUMENT_END_EVENT, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ implicit: implicit, ++ } ++ yaml_parser_set_event_comments(parser, event) ++ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { ++ event.foot_comment = event.head_comment ++ event.head_comment = nil ++ } ++ return true ++} ++ ++func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { ++ event.head_comment = parser.head_comment ++ event.line_comment = parser.line_comment ++ event.foot_comment = parser.foot_comment ++ parser.head_comment = nil ++ parser.line_comment = nil ++ parser.foot_comment = nil ++ parser.tail_comment = nil ++ parser.stem_comment = nil ++} ++ ++// Parse the productions: ++// block_node_or_indentless_sequence ::= ++// ALIAS ++// ***** ++// | properties (block_content | indentless_block_sequence)? ++// ********** * ++// | block_content | indentless_block_sequence ++// * ++// block_node ::= ALIAS ++// ***** ++// | properties block_content? ++// ********** * ++// | block_content ++// * ++// flow_node ::= ALIAS ++// ***** ++// | properties flow_content? ++// ********** * ++// | flow_content ++// * ++// properties ::= TAG ANCHOR? | ANCHOR TAG? ++// ************************* ++// block_content ::= block_collection | flow_collection | SCALAR ++// ****** ++// flow_content ::= flow_collection | SCALAR ++// ****** ++func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { ++ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() ++ ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ ++ if token.typ == yaml_ALIAS_TOKEN { ++ parser.state = parser.states[len(parser.states)-1] ++ parser.states = parser.states[:len(parser.states)-1] ++ *event = yaml_event_t{ ++ typ: yaml_ALIAS_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.end_mark, ++ anchor: token.value, ++ } ++ yaml_parser_set_event_comments(parser, event) ++ skip_token(parser) ++ return true ++ } ++ ++ start_mark := token.start_mark ++ end_mark := token.start_mark ++ ++ var tag_token bool ++ var tag_handle, tag_suffix, anchor []byte ++ var tag_mark yaml_mark_t ++ if token.typ == yaml_ANCHOR_TOKEN { ++ anchor = token.value ++ start_mark = token.start_mark ++ end_mark = token.end_mark ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ == yaml_TAG_TOKEN { ++ tag_token = true ++ tag_handle = token.value ++ tag_suffix = token.suffix ++ tag_mark = token.start_mark ++ end_mark = token.end_mark ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ } ++ } else if token.typ == yaml_TAG_TOKEN { ++ tag_token = true ++ tag_handle = token.value ++ tag_suffix = token.suffix ++ start_mark = token.start_mark ++ tag_mark = token.start_mark ++ end_mark = token.end_mark ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ == yaml_ANCHOR_TOKEN { ++ anchor = token.value ++ end_mark = token.end_mark ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ } ++ } ++ ++ var tag []byte ++ if tag_token { ++ if len(tag_handle) == 0 { ++ tag = tag_suffix ++ tag_suffix = nil ++ } else { ++ for i := range parser.tag_directives { ++ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { ++ tag = append([]byte(nil), parser.tag_directives[i].prefix...) ++ tag = append(tag, tag_suffix...) ++ break ++ } ++ } ++ if len(tag) == 0 { ++ yaml_parser_set_parser_error_context(parser, ++ "while parsing a node", start_mark, ++ "found undefined tag handle", tag_mark) ++ return false ++ } ++ } ++ } ++ ++ implicit := len(tag) == 0 ++ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { ++ end_mark = token.end_mark ++ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE ++ *event = yaml_event_t{ ++ typ: yaml_SEQUENCE_START_EVENT, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ anchor: anchor, ++ tag: tag, ++ implicit: implicit, ++ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), ++ } ++ return true ++ } ++ if token.typ == yaml_SCALAR_TOKEN { ++ var plain_implicit, quoted_implicit bool ++ end_mark = token.end_mark ++ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { ++ plain_implicit = true ++ } else if len(tag) == 0 { ++ quoted_implicit = true ++ } ++ parser.state = parser.states[len(parser.states)-1] ++ parser.states = parser.states[:len(parser.states)-1] ++ ++ *event = yaml_event_t{ ++ typ: yaml_SCALAR_EVENT, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ anchor: anchor, ++ tag: tag, ++ value: token.value, ++ implicit: plain_implicit, ++ quoted_implicit: quoted_implicit, ++ style: yaml_style_t(token.style), ++ } ++ yaml_parser_set_event_comments(parser, event) ++ skip_token(parser) ++ return true ++ } ++ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { ++ // [Go] Some of the events below can be merged as they differ only on style. ++ end_mark = token.end_mark ++ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE ++ *event = yaml_event_t{ ++ typ: yaml_SEQUENCE_START_EVENT, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ anchor: anchor, ++ tag: tag, ++ implicit: implicit, ++ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), ++ } ++ yaml_parser_set_event_comments(parser, event) ++ return true ++ } ++ if token.typ == yaml_FLOW_MAPPING_START_TOKEN { ++ end_mark = token.end_mark ++ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE ++ *event = yaml_event_t{ ++ typ: yaml_MAPPING_START_EVENT, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ anchor: anchor, ++ tag: tag, ++ implicit: implicit, ++ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), ++ } ++ yaml_parser_set_event_comments(parser, event) ++ return true ++ } ++ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { ++ end_mark = token.end_mark ++ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE ++ *event = yaml_event_t{ ++ typ: yaml_SEQUENCE_START_EVENT, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ anchor: anchor, ++ tag: tag, ++ implicit: implicit, ++ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), ++ } ++ if parser.stem_comment != nil { ++ event.head_comment = parser.stem_comment ++ parser.stem_comment = nil ++ } ++ return true ++ } ++ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { ++ end_mark = token.end_mark ++ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE ++ *event = yaml_event_t{ ++ typ: yaml_MAPPING_START_EVENT, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ anchor: anchor, ++ tag: tag, ++ implicit: implicit, ++ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), ++ } ++ if parser.stem_comment != nil { ++ event.head_comment = parser.stem_comment ++ parser.stem_comment = nil ++ } ++ return true ++ } ++ if len(anchor) > 0 || len(tag) > 0 { ++ parser.state = parser.states[len(parser.states)-1] ++ parser.states = parser.states[:len(parser.states)-1] ++ ++ *event = yaml_event_t{ ++ typ: yaml_SCALAR_EVENT, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ anchor: anchor, ++ tag: tag, ++ implicit: implicit, ++ quoted_implicit: false, ++ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), ++ } ++ return true ++ } ++ ++ context := "while parsing a flow node" ++ if block { ++ context = "while parsing a block node" ++ } ++ yaml_parser_set_parser_error_context(parser, context, start_mark, ++ "did not find expected node content", token.start_mark) ++ return false ++} ++ ++// Parse the productions: ++// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END ++// ******************** *********** * ********* ++// ++func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { ++ if first { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ parser.marks = append(parser.marks, token.start_mark) ++ skip_token(parser) ++ } ++ ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ ++ if token.typ == yaml_BLOCK_ENTRY_TOKEN { ++ mark := token.end_mark ++ prior_head_len := len(parser.head_comment) ++ skip_token(parser) ++ yaml_parser_split_stem_comment(parser, prior_head_len) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { ++ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) ++ return yaml_parser_parse_node(parser, event, true, false) ++ } else { ++ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE ++ return yaml_parser_process_empty_scalar(parser, event, mark) ++ } ++ } ++ if token.typ == yaml_BLOCK_END_TOKEN { ++ parser.state = parser.states[len(parser.states)-1] ++ parser.states = parser.states[:len(parser.states)-1] ++ parser.marks = parser.marks[:len(parser.marks)-1] ++ ++ *event = yaml_event_t{ ++ typ: yaml_SEQUENCE_END_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.end_mark, ++ } ++ ++ skip_token(parser) ++ return true ++ } ++ ++ context_mark := parser.marks[len(parser.marks)-1] ++ parser.marks = parser.marks[:len(parser.marks)-1] ++ return yaml_parser_set_parser_error_context(parser, ++ "while parsing a block collection", context_mark, ++ "did not find expected '-' indicator", token.start_mark) ++} ++ ++// Parse the productions: ++// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ ++// *********** * ++func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ ++ if token.typ == yaml_BLOCK_ENTRY_TOKEN { ++ mark := token.end_mark ++ prior_head_len := len(parser.head_comment) ++ skip_token(parser) ++ yaml_parser_split_stem_comment(parser, prior_head_len) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_BLOCK_ENTRY_TOKEN && ++ token.typ != yaml_KEY_TOKEN && ++ token.typ != yaml_VALUE_TOKEN && ++ token.typ != yaml_BLOCK_END_TOKEN { ++ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) ++ return yaml_parser_parse_node(parser, event, true, false) ++ } ++ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE ++ return yaml_parser_process_empty_scalar(parser, event, mark) ++ } ++ parser.state = parser.states[len(parser.states)-1] ++ parser.states = parser.states[:len(parser.states)-1] ++ ++ *event = yaml_event_t{ ++ typ: yaml_SEQUENCE_END_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? ++ } ++ return true ++} ++ ++// Split stem comment from head comment. ++// ++// When a sequence or map is found under a sequence entry, the former head comment ++// is assigned to the underlying sequence or map as a whole, not the individual ++// sequence or map entry as would be expected otherwise. To handle this case the ++// previous head comment is moved aside as the stem comment. ++func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { ++ if stem_len == 0 { ++ return ++ } ++ ++ token := peek_token(parser) ++ if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { ++ return ++ } ++ ++ parser.stem_comment = parser.head_comment[:stem_len] ++ if len(parser.head_comment) == stem_len { ++ parser.head_comment = nil ++ } else { ++ // Copy suffix to prevent very strange bugs if someone ever appends ++ // further bytes to the prefix in the stem_comment slice above. ++ parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) ++ } ++} ++ ++// Parse the productions: ++// block_mapping ::= BLOCK-MAPPING_START ++// ******************* ++// ((KEY block_node_or_indentless_sequence?)? ++// *** * ++// (VALUE block_node_or_indentless_sequence?)?)* ++// ++// BLOCK-END ++// ********* ++// ++func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { ++ if first { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ parser.marks = append(parser.marks, token.start_mark) ++ skip_token(parser) ++ } ++ ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ ++ // [Go] A tail comment was left from the prior mapping value processed. Emit an event ++ // as it needs to be processed with that value and not the following key. ++ if len(parser.tail_comment) > 0 { ++ *event = yaml_event_t{ ++ typ: yaml_TAIL_COMMENT_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.end_mark, ++ foot_comment: parser.tail_comment, ++ } ++ parser.tail_comment = nil ++ return true ++ } ++ ++ if token.typ == yaml_KEY_TOKEN { ++ mark := token.end_mark ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_KEY_TOKEN && ++ token.typ != yaml_VALUE_TOKEN && ++ token.typ != yaml_BLOCK_END_TOKEN { ++ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) ++ return yaml_parser_parse_node(parser, event, true, true) ++ } else { ++ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE ++ return yaml_parser_process_empty_scalar(parser, event, mark) ++ } ++ } else if token.typ == yaml_BLOCK_END_TOKEN { ++ parser.state = parser.states[len(parser.states)-1] ++ parser.states = parser.states[:len(parser.states)-1] ++ parser.marks = parser.marks[:len(parser.marks)-1] ++ *event = yaml_event_t{ ++ typ: yaml_MAPPING_END_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.end_mark, ++ } ++ yaml_parser_set_event_comments(parser, event) ++ skip_token(parser) ++ return true ++ } ++ ++ context_mark := parser.marks[len(parser.marks)-1] ++ parser.marks = parser.marks[:len(parser.marks)-1] ++ return yaml_parser_set_parser_error_context(parser, ++ "while parsing a block mapping", context_mark, ++ "did not find expected key", token.start_mark) ++} ++ ++// Parse the productions: ++// block_mapping ::= BLOCK-MAPPING_START ++// ++// ((KEY block_node_or_indentless_sequence?)? ++// ++// (VALUE block_node_or_indentless_sequence?)?)* ++// ***** * ++// BLOCK-END ++// ++// ++func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ == yaml_VALUE_TOKEN { ++ mark := token.end_mark ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_KEY_TOKEN && ++ token.typ != yaml_VALUE_TOKEN && ++ token.typ != yaml_BLOCK_END_TOKEN { ++ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) ++ return yaml_parser_parse_node(parser, event, true, true) ++ } ++ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE ++ return yaml_parser_process_empty_scalar(parser, event, mark) ++ } ++ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE ++ return yaml_parser_process_empty_scalar(parser, event, token.start_mark) ++} ++ ++// Parse the productions: ++// flow_sequence ::= FLOW-SEQUENCE-START ++// ******************* ++// (flow_sequence_entry FLOW-ENTRY)* ++// * ********** ++// flow_sequence_entry? ++// * ++// FLOW-SEQUENCE-END ++// ***************** ++// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? ++// * ++// ++func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { ++ if first { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ parser.marks = append(parser.marks, token.start_mark) ++ skip_token(parser) ++ } ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { ++ if !first { ++ if token.typ == yaml_FLOW_ENTRY_TOKEN { ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ } else { ++ context_mark := parser.marks[len(parser.marks)-1] ++ parser.marks = parser.marks[:len(parser.marks)-1] ++ return yaml_parser_set_parser_error_context(parser, ++ "while parsing a flow sequence", context_mark, ++ "did not find expected ',' or ']'", token.start_mark) ++ } ++ } ++ ++ if token.typ == yaml_KEY_TOKEN { ++ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE ++ *event = yaml_event_t{ ++ typ: yaml_MAPPING_START_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.end_mark, ++ implicit: true, ++ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), ++ } ++ skip_token(parser) ++ return true ++ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { ++ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) ++ return yaml_parser_parse_node(parser, event, false, false) ++ } ++ } ++ ++ parser.state = parser.states[len(parser.states)-1] ++ parser.states = parser.states[:len(parser.states)-1] ++ parser.marks = parser.marks[:len(parser.marks)-1] ++ ++ *event = yaml_event_t{ ++ typ: yaml_SEQUENCE_END_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.end_mark, ++ } ++ yaml_parser_set_event_comments(parser, event) ++ ++ skip_token(parser) ++ return true ++} ++ ++// ++// Parse the productions: ++// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? ++// *** * ++// ++func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_VALUE_TOKEN && ++ token.typ != yaml_FLOW_ENTRY_TOKEN && ++ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { ++ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) ++ return yaml_parser_parse_node(parser, event, false, false) ++ } ++ mark := token.end_mark ++ skip_token(parser) ++ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE ++ return yaml_parser_process_empty_scalar(parser, event, mark) ++} ++ ++// Parse the productions: ++// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? ++// ***** * ++// ++func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ == yaml_VALUE_TOKEN { ++ skip_token(parser) ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { ++ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) ++ return yaml_parser_parse_node(parser, event, false, false) ++ } ++ } ++ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE ++ return yaml_parser_process_empty_scalar(parser, event, token.start_mark) ++} ++ ++// Parse the productions: ++// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? ++// * ++// ++func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE ++ *event = yaml_event_t{ ++ typ: yaml_MAPPING_END_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? ++ } ++ return true ++} ++ ++// Parse the productions: ++// flow_mapping ::= FLOW-MAPPING-START ++// ****************** ++// (flow_mapping_entry FLOW-ENTRY)* ++// * ********** ++// flow_mapping_entry? ++// ****************** ++// FLOW-MAPPING-END ++// **************** ++// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? ++// * *** * ++// ++func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { ++ if first { ++ token := peek_token(parser) ++ parser.marks = append(parser.marks, token.start_mark) ++ skip_token(parser) ++ } ++ ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ ++ if token.typ != yaml_FLOW_MAPPING_END_TOKEN { ++ if !first { ++ if token.typ == yaml_FLOW_ENTRY_TOKEN { ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ } else { ++ context_mark := parser.marks[len(parser.marks)-1] ++ parser.marks = parser.marks[:len(parser.marks)-1] ++ return yaml_parser_set_parser_error_context(parser, ++ "while parsing a flow mapping", context_mark, ++ "did not find expected ',' or '}'", token.start_mark) ++ } ++ } ++ ++ if token.typ == yaml_KEY_TOKEN { ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_VALUE_TOKEN && ++ token.typ != yaml_FLOW_ENTRY_TOKEN && ++ token.typ != yaml_FLOW_MAPPING_END_TOKEN { ++ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) ++ return yaml_parser_parse_node(parser, event, false, false) ++ } else { ++ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE ++ return yaml_parser_process_empty_scalar(parser, event, token.start_mark) ++ } ++ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { ++ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) ++ return yaml_parser_parse_node(parser, event, false, false) ++ } ++ } ++ ++ parser.state = parser.states[len(parser.states)-1] ++ parser.states = parser.states[:len(parser.states)-1] ++ parser.marks = parser.marks[:len(parser.marks)-1] ++ *event = yaml_event_t{ ++ typ: yaml_MAPPING_END_EVENT, ++ start_mark: token.start_mark, ++ end_mark: token.end_mark, ++ } ++ yaml_parser_set_event_comments(parser, event) ++ skip_token(parser) ++ return true ++} ++ ++// Parse the productions: ++// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? ++// * ***** * ++// ++func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if empty { ++ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE ++ return yaml_parser_process_empty_scalar(parser, event, token.start_mark) ++ } ++ if token.typ == yaml_VALUE_TOKEN { ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { ++ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) ++ return yaml_parser_parse_node(parser, event, false, false) ++ } ++ } ++ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE ++ return yaml_parser_process_empty_scalar(parser, event, token.start_mark) ++} ++ ++// Generate an empty scalar event. ++func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { ++ *event = yaml_event_t{ ++ typ: yaml_SCALAR_EVENT, ++ start_mark: mark, ++ end_mark: mark, ++ value: nil, // Empty ++ implicit: true, ++ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), ++ } ++ return true ++} ++ ++var default_tag_directives = []yaml_tag_directive_t{ ++ {[]byte("!"), []byte("!")}, ++ {[]byte("!!"), []byte("tag:yaml.org,2002:")}, ++} ++ ++// Parse directives. ++func yaml_parser_process_directives(parser *yaml_parser_t, ++ version_directive_ref **yaml_version_directive_t, ++ tag_directives_ref *[]yaml_tag_directive_t) bool { ++ ++ var version_directive *yaml_version_directive_t ++ var tag_directives []yaml_tag_directive_t ++ ++ token := peek_token(parser) ++ if token == nil { ++ return false ++ } ++ ++ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { ++ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { ++ if version_directive != nil { ++ yaml_parser_set_parser_error(parser, ++ "found duplicate %YAML directive", token.start_mark) ++ return false ++ } ++ if token.major != 1 || token.minor != 1 { ++ yaml_parser_set_parser_error(parser, ++ "found incompatible YAML document", token.start_mark) ++ return false ++ } ++ version_directive = &yaml_version_directive_t{ ++ major: token.major, ++ minor: token.minor, ++ } ++ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { ++ value := yaml_tag_directive_t{ ++ handle: token.value, ++ prefix: token.prefix, ++ } ++ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { ++ return false ++ } ++ tag_directives = append(tag_directives, value) ++ } ++ ++ skip_token(parser) ++ token = peek_token(parser) ++ if token == nil { ++ return false ++ } ++ } ++ ++ for i := range default_tag_directives { ++ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { ++ return false ++ } ++ } ++ ++ if version_directive_ref != nil { ++ *version_directive_ref = version_directive ++ } ++ if tag_directives_ref != nil { ++ *tag_directives_ref = tag_directives ++ } ++ return true ++} ++ ++// Append a tag directive to the directives stack. ++func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { ++ for i := range parser.tag_directives { ++ if bytes.Equal(value.handle, parser.tag_directives[i].handle) { ++ if allow_duplicates { ++ return true ++ } ++ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) ++ } ++ } ++ ++ // [Go] I suspect the copy is unnecessary. This was likely done ++ // because there was no way to track ownership of the data. ++ value_copy := yaml_tag_directive_t{ ++ handle: make([]byte, len(value.handle)), ++ prefix: make([]byte, len(value.prefix)), ++ } ++ copy(value_copy.handle, value.handle) ++ copy(value_copy.prefix, value.prefix) ++ parser.tag_directives = append(parser.tag_directives, value_copy) ++ return true ++} +diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go +new file mode 100644 +index 000000000000..b7de0a89c462 +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/readerc.go +@@ -0,0 +1,434 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// Copyright (c) 2006-2010 Kirill Simonov ++// ++// Permission is hereby granted, free of charge, to any person obtaining a copy of ++// this software and associated documentation files (the "Software"), to deal in ++// the Software without restriction, including without limitation the rights to ++// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++// of the Software, and to permit persons to whom the Software is furnished to do ++// so, subject to the following conditions: ++// ++// The above copyright notice and this permission notice shall be included in all ++// copies or substantial portions of the Software. ++// ++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++// SOFTWARE. ++ ++package yaml ++ ++import ( ++ "io" ++) ++ ++// Set the reader error and return 0. ++func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { ++ parser.error = yaml_READER_ERROR ++ parser.problem = problem ++ parser.problem_offset = offset ++ parser.problem_value = value ++ return false ++} ++ ++// Byte order marks. ++const ( ++ bom_UTF8 = "\xef\xbb\xbf" ++ bom_UTF16LE = "\xff\xfe" ++ bom_UTF16BE = "\xfe\xff" ++) ++ ++// Determine the input stream encoding by checking the BOM symbol. If no BOM is ++// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. ++func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { ++ // Ensure that we had enough bytes in the raw buffer. ++ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { ++ if !yaml_parser_update_raw_buffer(parser) { ++ return false ++ } ++ } ++ ++ // Determine the encoding. ++ buf := parser.raw_buffer ++ pos := parser.raw_buffer_pos ++ avail := len(buf) - pos ++ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { ++ parser.encoding = yaml_UTF16LE_ENCODING ++ parser.raw_buffer_pos += 2 ++ parser.offset += 2 ++ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { ++ parser.encoding = yaml_UTF16BE_ENCODING ++ parser.raw_buffer_pos += 2 ++ parser.offset += 2 ++ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { ++ parser.encoding = yaml_UTF8_ENCODING ++ parser.raw_buffer_pos += 3 ++ parser.offset += 3 ++ } else { ++ parser.encoding = yaml_UTF8_ENCODING ++ } ++ return true ++} ++ ++// Update the raw buffer. ++func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { ++ size_read := 0 ++ ++ // Return if the raw buffer is full. ++ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { ++ return true ++ } ++ ++ // Return on EOF. ++ if parser.eof { ++ return true ++ } ++ ++ // Move the remaining bytes in the raw buffer to the beginning. ++ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { ++ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) ++ } ++ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] ++ parser.raw_buffer_pos = 0 ++ ++ // Call the read handler to fill the buffer. ++ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) ++ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] ++ if err == io.EOF { ++ parser.eof = true ++ } else if err != nil { ++ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) ++ } ++ return true ++} ++ ++// Ensure that the buffer contains at least `length` characters. ++// Return true on success, false on failure. ++// ++// The length is supposed to be significantly less that the buffer size. ++func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { ++ if parser.read_handler == nil { ++ panic("read handler must be set") ++ } ++ ++ // [Go] This function was changed to guarantee the requested length size at EOF. ++ // The fact we need to do this is pretty awful, but the description above implies ++ // for that to be the case, and there are tests ++ ++ // If the EOF flag is set and the raw buffer is empty, do nothing. ++ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { ++ // [Go] ACTUALLY! Read the documentation of this function above. ++ // This is just broken. To return true, we need to have the ++ // given length in the buffer. Not doing that means every single ++ // check that calls this function to make sure the buffer has a ++ // given length is Go) panicking; or C) accessing invalid memory. ++ //return true ++ } ++ ++ // Return if the buffer contains enough characters. ++ if parser.unread >= length { ++ return true ++ } ++ ++ // Determine the input encoding if it is not known yet. ++ if parser.encoding == yaml_ANY_ENCODING { ++ if !yaml_parser_determine_encoding(parser) { ++ return false ++ } ++ } ++ ++ // Move the unread characters to the beginning of the buffer. ++ buffer_len := len(parser.buffer) ++ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { ++ copy(parser.buffer, parser.buffer[parser.buffer_pos:]) ++ buffer_len -= parser.buffer_pos ++ parser.buffer_pos = 0 ++ } else if parser.buffer_pos == buffer_len { ++ buffer_len = 0 ++ parser.buffer_pos = 0 ++ } ++ ++ // Open the whole buffer for writing, and cut it before returning. ++ parser.buffer = parser.buffer[:cap(parser.buffer)] ++ ++ // Fill the buffer until it has enough characters. ++ first := true ++ for parser.unread < length { ++ ++ // Fill the raw buffer if necessary. ++ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { ++ if !yaml_parser_update_raw_buffer(parser) { ++ parser.buffer = parser.buffer[:buffer_len] ++ return false ++ } ++ } ++ first = false ++ ++ // Decode the raw buffer. ++ inner: ++ for parser.raw_buffer_pos != len(parser.raw_buffer) { ++ var value rune ++ var width int ++ ++ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos ++ ++ // Decode the next character. ++ switch parser.encoding { ++ case yaml_UTF8_ENCODING: ++ // Decode a UTF-8 character. Check RFC 3629 ++ // (http://www.ietf.org/rfc/rfc3629.txt) for more details. ++ // ++ // The following table (taken from the RFC) is used for ++ // decoding. ++ // ++ // Char. number range | UTF-8 octet sequence ++ // (hexadecimal) | (binary) ++ // --------------------+------------------------------------ ++ // 0000 0000-0000 007F | 0xxxxxxx ++ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx ++ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx ++ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx ++ // ++ // Additionally, the characters in the range 0xD800-0xDFFF ++ // are prohibited as they are reserved for use with UTF-16 ++ // surrogate pairs. ++ ++ // Determine the length of the UTF-8 sequence. ++ octet := parser.raw_buffer[parser.raw_buffer_pos] ++ switch { ++ case octet&0x80 == 0x00: ++ width = 1 ++ case octet&0xE0 == 0xC0: ++ width = 2 ++ case octet&0xF0 == 0xE0: ++ width = 3 ++ case octet&0xF8 == 0xF0: ++ width = 4 ++ default: ++ // The leading octet is invalid. ++ return yaml_parser_set_reader_error(parser, ++ "invalid leading UTF-8 octet", ++ parser.offset, int(octet)) ++ } ++ ++ // Check if the raw buffer contains an incomplete character. ++ if width > raw_unread { ++ if parser.eof { ++ return yaml_parser_set_reader_error(parser, ++ "incomplete UTF-8 octet sequence", ++ parser.offset, -1) ++ } ++ break inner ++ } ++ ++ // Decode the leading octet. ++ switch { ++ case octet&0x80 == 0x00: ++ value = rune(octet & 0x7F) ++ case octet&0xE0 == 0xC0: ++ value = rune(octet & 0x1F) ++ case octet&0xF0 == 0xE0: ++ value = rune(octet & 0x0F) ++ case octet&0xF8 == 0xF0: ++ value = rune(octet & 0x07) ++ default: ++ value = 0 ++ } ++ ++ // Check and decode the trailing octets. ++ for k := 1; k < width; k++ { ++ octet = parser.raw_buffer[parser.raw_buffer_pos+k] ++ ++ // Check if the octet is valid. ++ if (octet & 0xC0) != 0x80 { ++ return yaml_parser_set_reader_error(parser, ++ "invalid trailing UTF-8 octet", ++ parser.offset+k, int(octet)) ++ } ++ ++ // Decode the octet. ++ value = (value << 6) + rune(octet&0x3F) ++ } ++ ++ // Check the length of the sequence against the value. ++ switch { ++ case width == 1: ++ case width == 2 && value >= 0x80: ++ case width == 3 && value >= 0x800: ++ case width == 4 && value >= 0x10000: ++ default: ++ return yaml_parser_set_reader_error(parser, ++ "invalid length of a UTF-8 sequence", ++ parser.offset, -1) ++ } ++ ++ // Check the range of the value. ++ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { ++ return yaml_parser_set_reader_error(parser, ++ "invalid Unicode character", ++ parser.offset, int(value)) ++ } ++ ++ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: ++ var low, high int ++ if parser.encoding == yaml_UTF16LE_ENCODING { ++ low, high = 0, 1 ++ } else { ++ low, high = 1, 0 ++ } ++ ++ // The UTF-16 encoding is not as simple as one might ++ // naively think. Check RFC 2781 ++ // (http://www.ietf.org/rfc/rfc2781.txt). ++ // ++ // Normally, two subsequent bytes describe a Unicode ++ // character. However a special technique (called a ++ // surrogate pair) is used for specifying character ++ // values larger than 0xFFFF. ++ // ++ // A surrogate pair consists of two pseudo-characters: ++ // high surrogate area (0xD800-0xDBFF) ++ // low surrogate area (0xDC00-0xDFFF) ++ // ++ // The following formulas are used for decoding ++ // and encoding characters using surrogate pairs: ++ // ++ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) ++ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) ++ // W1 = 110110yyyyyyyyyy ++ // W2 = 110111xxxxxxxxxx ++ // ++ // where U is the character value, W1 is the high surrogate ++ // area, W2 is the low surrogate area. ++ ++ // Check for incomplete UTF-16 character. ++ if raw_unread < 2 { ++ if parser.eof { ++ return yaml_parser_set_reader_error(parser, ++ "incomplete UTF-16 character", ++ parser.offset, -1) ++ } ++ break inner ++ } ++ ++ // Get the character. ++ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + ++ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) ++ ++ // Check for unexpected low surrogate area. ++ if value&0xFC00 == 0xDC00 { ++ return yaml_parser_set_reader_error(parser, ++ "unexpected low surrogate area", ++ parser.offset, int(value)) ++ } ++ ++ // Check for a high surrogate area. ++ if value&0xFC00 == 0xD800 { ++ width = 4 ++ ++ // Check for incomplete surrogate pair. ++ if raw_unread < 4 { ++ if parser.eof { ++ return yaml_parser_set_reader_error(parser, ++ "incomplete UTF-16 surrogate pair", ++ parser.offset, -1) ++ } ++ break inner ++ } ++ ++ // Get the next character. ++ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + ++ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) ++ ++ // Check for a low surrogate area. ++ if value2&0xFC00 != 0xDC00 { ++ return yaml_parser_set_reader_error(parser, ++ "expected low surrogate area", ++ parser.offset+2, int(value2)) ++ } ++ ++ // Generate the value of the surrogate pair. ++ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) ++ } else { ++ width = 2 ++ } ++ ++ default: ++ panic("impossible") ++ } ++ ++ // Check if the character is in the allowed range: ++ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) ++ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) ++ // | [#x10000-#x10FFFF] (32 bit) ++ switch { ++ case value == 0x09: ++ case value == 0x0A: ++ case value == 0x0D: ++ case value >= 0x20 && value <= 0x7E: ++ case value == 0x85: ++ case value >= 0xA0 && value <= 0xD7FF: ++ case value >= 0xE000 && value <= 0xFFFD: ++ case value >= 0x10000 && value <= 0x10FFFF: ++ default: ++ return yaml_parser_set_reader_error(parser, ++ "control characters are not allowed", ++ parser.offset, int(value)) ++ } ++ ++ // Move the raw pointers. ++ parser.raw_buffer_pos += width ++ parser.offset += width ++ ++ // Finally put the character into the buffer. ++ if value <= 0x7F { ++ // 0000 0000-0000 007F . 0xxxxxxx ++ parser.buffer[buffer_len+0] = byte(value) ++ buffer_len += 1 ++ } else if value <= 0x7FF { ++ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx ++ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) ++ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) ++ buffer_len += 2 ++ } else if value <= 0xFFFF { ++ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx ++ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) ++ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) ++ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) ++ buffer_len += 3 ++ } else { ++ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx ++ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) ++ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) ++ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) ++ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) ++ buffer_len += 4 ++ } ++ ++ parser.unread++ ++ } ++ ++ // On EOF, put NUL into the buffer and return. ++ if parser.eof { ++ parser.buffer[buffer_len] = 0 ++ buffer_len++ ++ parser.unread++ ++ break ++ } ++ } ++ // [Go] Read the documentation of this function above. To return true, ++ // we need to have the given length in the buffer. Not doing that means ++ // every single check that calls this function to make sure the buffer ++ // has a given length is Go) panicking; or C) accessing invalid memory. ++ // This happens here due to the EOF above breaking early. ++ for buffer_len < length { ++ parser.buffer[buffer_len] = 0 ++ buffer_len++ ++ } ++ parser.buffer = parser.buffer[:buffer_len] ++ return true ++} +diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go +new file mode 100644 +index 000000000000..64ae888057a5 +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/resolve.go +@@ -0,0 +1,326 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package yaml ++ ++import ( ++ "encoding/base64" ++ "math" ++ "regexp" ++ "strconv" ++ "strings" ++ "time" ++) ++ ++type resolveMapItem struct { ++ value interface{} ++ tag string ++} ++ ++var resolveTable = make([]byte, 256) ++var resolveMap = make(map[string]resolveMapItem) ++ ++func init() { ++ t := resolveTable ++ t[int('+')] = 'S' // Sign ++ t[int('-')] = 'S' ++ for _, c := range "0123456789" { ++ t[int(c)] = 'D' // Digit ++ } ++ for _, c := range "yYnNtTfFoO~" { ++ t[int(c)] = 'M' // In map ++ } ++ t[int('.')] = '.' // Float (potentially in map) ++ ++ var resolveMapList = []struct { ++ v interface{} ++ tag string ++ l []string ++ }{ ++ {true, boolTag, []string{"true", "True", "TRUE"}}, ++ {false, boolTag, []string{"false", "False", "FALSE"}}, ++ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, ++ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, ++ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, ++ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, ++ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, ++ {"<<", mergeTag, []string{"<<"}}, ++ } ++ ++ m := resolveMap ++ for _, item := range resolveMapList { ++ for _, s := range item.l { ++ m[s] = resolveMapItem{item.v, item.tag} ++ } ++ } ++} ++ ++const ( ++ nullTag = "!!null" ++ boolTag = "!!bool" ++ strTag = "!!str" ++ intTag = "!!int" ++ floatTag = "!!float" ++ timestampTag = "!!timestamp" ++ seqTag = "!!seq" ++ mapTag = "!!map" ++ binaryTag = "!!binary" ++ mergeTag = "!!merge" ++) ++ ++var longTags = make(map[string]string) ++var shortTags = make(map[string]string) ++ ++func init() { ++ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { ++ ltag := longTag(stag) ++ longTags[stag] = ltag ++ shortTags[ltag] = stag ++ } ++} ++ ++const longTagPrefix = "tag:yaml.org,2002:" ++ ++func shortTag(tag string) string { ++ if strings.HasPrefix(tag, longTagPrefix) { ++ if stag, ok := shortTags[tag]; ok { ++ return stag ++ } ++ return "!!" + tag[len(longTagPrefix):] ++ } ++ return tag ++} ++ ++func longTag(tag string) string { ++ if strings.HasPrefix(tag, "!!") { ++ if ltag, ok := longTags[tag]; ok { ++ return ltag ++ } ++ return longTagPrefix + tag[2:] ++ } ++ return tag ++} ++ ++func resolvableTag(tag string) bool { ++ switch tag { ++ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: ++ return true ++ } ++ return false ++} ++ ++var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) ++ ++func resolve(tag string, in string) (rtag string, out interface{}) { ++ tag = shortTag(tag) ++ if !resolvableTag(tag) { ++ return tag, in ++ } ++ ++ defer func() { ++ switch tag { ++ case "", rtag, strTag, binaryTag: ++ return ++ case floatTag: ++ if rtag == intTag { ++ switch v := out.(type) { ++ case int64: ++ rtag = floatTag ++ out = float64(v) ++ return ++ case int: ++ rtag = floatTag ++ out = float64(v) ++ return ++ } ++ } ++ } ++ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) ++ }() ++ ++ // Any data is accepted as a !!str or !!binary. ++ // Otherwise, the prefix is enough of a hint about what it might be. ++ hint := byte('N') ++ if in != "" { ++ hint = resolveTable[in[0]] ++ } ++ if hint != 0 && tag != strTag && tag != binaryTag { ++ // Handle things we can lookup in a map. ++ if item, ok := resolveMap[in]; ok { ++ return item.tag, item.value ++ } ++ ++ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and ++ // are purposefully unsupported here. They're still quoted on ++ // the way out for compatibility with other parser, though. ++ ++ switch hint { ++ case 'M': ++ // We've already checked the map above. ++ ++ case '.': ++ // Not in the map, so maybe a normal float. ++ floatv, err := strconv.ParseFloat(in, 64) ++ if err == nil { ++ return floatTag, floatv ++ } ++ ++ case 'D', 'S': ++ // Int, float, or timestamp. ++ // Only try values as a timestamp if the value is unquoted or there's an explicit ++ // !!timestamp tag. ++ if tag == "" || tag == timestampTag { ++ t, ok := parseTimestamp(in) ++ if ok { ++ return timestampTag, t ++ } ++ } ++ ++ plain := strings.Replace(in, "_", "", -1) ++ intv, err := strconv.ParseInt(plain, 0, 64) ++ if err == nil { ++ if intv == int64(int(intv)) { ++ return intTag, int(intv) ++ } else { ++ return intTag, intv ++ } ++ } ++ uintv, err := strconv.ParseUint(plain, 0, 64) ++ if err == nil { ++ return intTag, uintv ++ } ++ if yamlStyleFloat.MatchString(plain) { ++ floatv, err := strconv.ParseFloat(plain, 64) ++ if err == nil { ++ return floatTag, floatv ++ } ++ } ++ if strings.HasPrefix(plain, "0b") { ++ intv, err := strconv.ParseInt(plain[2:], 2, 64) ++ if err == nil { ++ if intv == int64(int(intv)) { ++ return intTag, int(intv) ++ } else { ++ return intTag, intv ++ } ++ } ++ uintv, err := strconv.ParseUint(plain[2:], 2, 64) ++ if err == nil { ++ return intTag, uintv ++ } ++ } else if strings.HasPrefix(plain, "-0b") { ++ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) ++ if err == nil { ++ if true || intv == int64(int(intv)) { ++ return intTag, int(intv) ++ } else { ++ return intTag, intv ++ } ++ } ++ } ++ // Octals as introduced in version 1.2 of the spec. ++ // Octals from the 1.1 spec, spelled as 0777, are still ++ // decoded by default in v3 as well for compatibility. ++ // May be dropped in v4 depending on how usage evolves. ++ if strings.HasPrefix(plain, "0o") { ++ intv, err := strconv.ParseInt(plain[2:], 8, 64) ++ if err == nil { ++ if intv == int64(int(intv)) { ++ return intTag, int(intv) ++ } else { ++ return intTag, intv ++ } ++ } ++ uintv, err := strconv.ParseUint(plain[2:], 8, 64) ++ if err == nil { ++ return intTag, uintv ++ } ++ } else if strings.HasPrefix(plain, "-0o") { ++ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) ++ if err == nil { ++ if true || intv == int64(int(intv)) { ++ return intTag, int(intv) ++ } else { ++ return intTag, intv ++ } ++ } ++ } ++ default: ++ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") ++ } ++ } ++ return strTag, in ++} ++ ++// encodeBase64 encodes s as base64 that is broken up into multiple lines ++// as appropriate for the resulting length. ++func encodeBase64(s string) string { ++ const lineLen = 70 ++ encLen := base64.StdEncoding.EncodedLen(len(s)) ++ lines := encLen/lineLen + 1 ++ buf := make([]byte, encLen*2+lines) ++ in := buf[0:encLen] ++ out := buf[encLen:] ++ base64.StdEncoding.Encode(in, []byte(s)) ++ k := 0 ++ for i := 0; i < len(in); i += lineLen { ++ j := i + lineLen ++ if j > len(in) { ++ j = len(in) ++ } ++ k += copy(out[k:], in[i:j]) ++ if lines > 1 { ++ out[k] = '\n' ++ k++ ++ } ++ } ++ return string(out[:k]) ++} ++ ++// This is a subset of the formats allowed by the regular expression ++// defined at http://yaml.org/type/timestamp.html. ++var allowedTimestampFormats = []string{ ++ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. ++ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". ++ "2006-1-2 15:4:5.999999999", // space separated with no time zone ++ "2006-1-2", // date only ++ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" ++ // from the set of examples. ++} ++ ++// parseTimestamp parses s as a timestamp string and ++// returns the timestamp and reports whether it succeeded. ++// Timestamp formats are defined at http://yaml.org/type/timestamp.html ++func parseTimestamp(s string) (time.Time, bool) { ++ // TODO write code to check all the formats supported by ++ // http://yaml.org/type/timestamp.html instead of using time.Parse. ++ ++ // Quick check: all date formats start with YYYY-. ++ i := 0 ++ for ; i < len(s); i++ { ++ if c := s[i]; c < '0' || c > '9' { ++ break ++ } ++ } ++ if i != 4 || i == len(s) || s[i] != '-' { ++ return time.Time{}, false ++ } ++ for _, format := range allowedTimestampFormats { ++ if t, err := time.Parse(format, s); err == nil { ++ return t, true ++ } ++ } ++ return time.Time{}, false ++} +diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go +new file mode 100644 +index 000000000000..ca0070108f4e +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/scannerc.go +@@ -0,0 +1,3038 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// Copyright (c) 2006-2010 Kirill Simonov ++// ++// Permission is hereby granted, free of charge, to any person obtaining a copy of ++// this software and associated documentation files (the "Software"), to deal in ++// the Software without restriction, including without limitation the rights to ++// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++// of the Software, and to permit persons to whom the Software is furnished to do ++// so, subject to the following conditions: ++// ++// The above copyright notice and this permission notice shall be included in all ++// copies or substantial portions of the Software. ++// ++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++// SOFTWARE. ++ ++package yaml ++ ++import ( ++ "bytes" ++ "fmt" ++) ++ ++// Introduction ++// ************ ++// ++// The following notes assume that you are familiar with the YAML specification ++// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in ++// some cases we are less restrictive that it requires. ++// ++// The process of transforming a YAML stream into a sequence of events is ++// divided on two steps: Scanning and Parsing. ++// ++// The Scanner transforms the input stream into a sequence of tokens, while the ++// parser transform the sequence of tokens produced by the Scanner into a ++// sequence of parsing events. ++// ++// The Scanner is rather clever and complicated. The Parser, on the contrary, ++// is a straightforward implementation of a recursive-descendant parser (or, ++// LL(1) parser, as it is usually called). ++// ++// Actually there are two issues of Scanning that might be called "clever", the ++// rest is quite straightforward. The issues are "block collection start" and ++// "simple keys". Both issues are explained below in details. ++// ++// Here the Scanning step is explained and implemented. We start with the list ++// of all the tokens produced by the Scanner together with short descriptions. ++// ++// Now, tokens: ++// ++// STREAM-START(encoding) # The stream start. ++// STREAM-END # The stream end. ++// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. ++// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. ++// DOCUMENT-START # '---' ++// DOCUMENT-END # '...' ++// BLOCK-SEQUENCE-START # Indentation increase denoting a block ++// BLOCK-MAPPING-START # sequence or a block mapping. ++// BLOCK-END # Indentation decrease. ++// FLOW-SEQUENCE-START # '[' ++// FLOW-SEQUENCE-END # ']' ++// BLOCK-SEQUENCE-START # '{' ++// BLOCK-SEQUENCE-END # '}' ++// BLOCK-ENTRY # '-' ++// FLOW-ENTRY # ',' ++// KEY # '?' or nothing (simple keys). ++// VALUE # ':' ++// ALIAS(anchor) # '*anchor' ++// ANCHOR(anchor) # '&anchor' ++// TAG(handle,suffix) # '!handle!suffix' ++// SCALAR(value,style) # A scalar. ++// ++// The following two tokens are "virtual" tokens denoting the beginning and the ++// end of the stream: ++// ++// STREAM-START(encoding) ++// STREAM-END ++// ++// We pass the information about the input stream encoding with the ++// STREAM-START token. ++// ++// The next two tokens are responsible for tags: ++// ++// VERSION-DIRECTIVE(major,minor) ++// TAG-DIRECTIVE(handle,prefix) ++// ++// Example: ++// ++// %YAML 1.1 ++// %TAG ! !foo ++// %TAG !yaml! tag:yaml.org,2002: ++// --- ++// ++// The correspoding sequence of tokens: ++// ++// STREAM-START(utf-8) ++// VERSION-DIRECTIVE(1,1) ++// TAG-DIRECTIVE("!","!foo") ++// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") ++// DOCUMENT-START ++// STREAM-END ++// ++// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole ++// line. ++// ++// The document start and end indicators are represented by: ++// ++// DOCUMENT-START ++// DOCUMENT-END ++// ++// Note that if a YAML stream contains an implicit document (without '---' ++// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be ++// produced. ++// ++// In the following examples, we present whole documents together with the ++// produced tokens. ++// ++// 1. An implicit document: ++// ++// 'a scalar' ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// SCALAR("a scalar",single-quoted) ++// STREAM-END ++// ++// 2. An explicit document: ++// ++// --- ++// 'a scalar' ++// ... ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// DOCUMENT-START ++// SCALAR("a scalar",single-quoted) ++// DOCUMENT-END ++// STREAM-END ++// ++// 3. Several documents in a stream: ++// ++// 'a scalar' ++// --- ++// 'another scalar' ++// --- ++// 'yet another scalar' ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// SCALAR("a scalar",single-quoted) ++// DOCUMENT-START ++// SCALAR("another scalar",single-quoted) ++// DOCUMENT-START ++// SCALAR("yet another scalar",single-quoted) ++// STREAM-END ++// ++// We have already introduced the SCALAR token above. The following tokens are ++// used to describe aliases, anchors, tag, and scalars: ++// ++// ALIAS(anchor) ++// ANCHOR(anchor) ++// TAG(handle,suffix) ++// SCALAR(value,style) ++// ++// The following series of examples illustrate the usage of these tokens: ++// ++// 1. A recursive sequence: ++// ++// &A [ *A ] ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// ANCHOR("A") ++// FLOW-SEQUENCE-START ++// ALIAS("A") ++// FLOW-SEQUENCE-END ++// STREAM-END ++// ++// 2. A tagged scalar: ++// ++// !!float "3.14" # A good approximation. ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// TAG("!!","float") ++// SCALAR("3.14",double-quoted) ++// STREAM-END ++// ++// 3. Various scalar styles: ++// ++// --- # Implicit empty plain scalars do not produce tokens. ++// --- a plain scalar ++// --- 'a single-quoted scalar' ++// --- "a double-quoted scalar" ++// --- |- ++// a literal scalar ++// --- >- ++// a folded ++// scalar ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// DOCUMENT-START ++// DOCUMENT-START ++// SCALAR("a plain scalar",plain) ++// DOCUMENT-START ++// SCALAR("a single-quoted scalar",single-quoted) ++// DOCUMENT-START ++// SCALAR("a double-quoted scalar",double-quoted) ++// DOCUMENT-START ++// SCALAR("a literal scalar",literal) ++// DOCUMENT-START ++// SCALAR("a folded scalar",folded) ++// STREAM-END ++// ++// Now it's time to review collection-related tokens. We will start with ++// flow collections: ++// ++// FLOW-SEQUENCE-START ++// FLOW-SEQUENCE-END ++// FLOW-MAPPING-START ++// FLOW-MAPPING-END ++// FLOW-ENTRY ++// KEY ++// VALUE ++// ++// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and ++// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' ++// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the ++// indicators '?' and ':', which are used for denoting mapping keys and values, ++// are represented by the KEY and VALUE tokens. ++// ++// The following examples show flow collections: ++// ++// 1. A flow sequence: ++// ++// [item 1, item 2, item 3] ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// FLOW-SEQUENCE-START ++// SCALAR("item 1",plain) ++// FLOW-ENTRY ++// SCALAR("item 2",plain) ++// FLOW-ENTRY ++// SCALAR("item 3",plain) ++// FLOW-SEQUENCE-END ++// STREAM-END ++// ++// 2. A flow mapping: ++// ++// { ++// a simple key: a value, # Note that the KEY token is produced. ++// ? a complex key: another value, ++// } ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// FLOW-MAPPING-START ++// KEY ++// SCALAR("a simple key",plain) ++// VALUE ++// SCALAR("a value",plain) ++// FLOW-ENTRY ++// KEY ++// SCALAR("a complex key",plain) ++// VALUE ++// SCALAR("another value",plain) ++// FLOW-ENTRY ++// FLOW-MAPPING-END ++// STREAM-END ++// ++// A simple key is a key which is not denoted by the '?' indicator. Note that ++// the Scanner still produce the KEY token whenever it encounters a simple key. ++// ++// For scanning block collections, the following tokens are used (note that we ++// repeat KEY and VALUE here): ++// ++// BLOCK-SEQUENCE-START ++// BLOCK-MAPPING-START ++// BLOCK-END ++// BLOCK-ENTRY ++// KEY ++// VALUE ++// ++// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation ++// increase that precedes a block collection (cf. the INDENT token in Python). ++// The token BLOCK-END denote indentation decrease that ends a block collection ++// (cf. the DEDENT token in Python). However YAML has some syntax pecularities ++// that makes detections of these tokens more complex. ++// ++// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators ++// '-', '?', and ':' correspondingly. ++// ++// The following examples show how the tokens BLOCK-SEQUENCE-START, ++// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: ++// ++// 1. Block sequences: ++// ++// - item 1 ++// - item 2 ++// - ++// - item 3.1 ++// - item 3.2 ++// - ++// key 1: value 1 ++// key 2: value 2 ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// BLOCK-SEQUENCE-START ++// BLOCK-ENTRY ++// SCALAR("item 1",plain) ++// BLOCK-ENTRY ++// SCALAR("item 2",plain) ++// BLOCK-ENTRY ++// BLOCK-SEQUENCE-START ++// BLOCK-ENTRY ++// SCALAR("item 3.1",plain) ++// BLOCK-ENTRY ++// SCALAR("item 3.2",plain) ++// BLOCK-END ++// BLOCK-ENTRY ++// BLOCK-MAPPING-START ++// KEY ++// SCALAR("key 1",plain) ++// VALUE ++// SCALAR("value 1",plain) ++// KEY ++// SCALAR("key 2",plain) ++// VALUE ++// SCALAR("value 2",plain) ++// BLOCK-END ++// BLOCK-END ++// STREAM-END ++// ++// 2. Block mappings: ++// ++// a simple key: a value # The KEY token is produced here. ++// ? a complex key ++// : another value ++// a mapping: ++// key 1: value 1 ++// key 2: value 2 ++// a sequence: ++// - item 1 ++// - item 2 ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// BLOCK-MAPPING-START ++// KEY ++// SCALAR("a simple key",plain) ++// VALUE ++// SCALAR("a value",plain) ++// KEY ++// SCALAR("a complex key",plain) ++// VALUE ++// SCALAR("another value",plain) ++// KEY ++// SCALAR("a mapping",plain) ++// BLOCK-MAPPING-START ++// KEY ++// SCALAR("key 1",plain) ++// VALUE ++// SCALAR("value 1",plain) ++// KEY ++// SCALAR("key 2",plain) ++// VALUE ++// SCALAR("value 2",plain) ++// BLOCK-END ++// KEY ++// SCALAR("a sequence",plain) ++// VALUE ++// BLOCK-SEQUENCE-START ++// BLOCK-ENTRY ++// SCALAR("item 1",plain) ++// BLOCK-ENTRY ++// SCALAR("item 2",plain) ++// BLOCK-END ++// BLOCK-END ++// STREAM-END ++// ++// YAML does not always require to start a new block collection from a new ++// line. If the current line contains only '-', '?', and ':' indicators, a new ++// block collection may start at the current line. The following examples ++// illustrate this case: ++// ++// 1. Collections in a sequence: ++// ++// - - item 1 ++// - item 2 ++// - key 1: value 1 ++// key 2: value 2 ++// - ? complex key ++// : complex value ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// BLOCK-SEQUENCE-START ++// BLOCK-ENTRY ++// BLOCK-SEQUENCE-START ++// BLOCK-ENTRY ++// SCALAR("item 1",plain) ++// BLOCK-ENTRY ++// SCALAR("item 2",plain) ++// BLOCK-END ++// BLOCK-ENTRY ++// BLOCK-MAPPING-START ++// KEY ++// SCALAR("key 1",plain) ++// VALUE ++// SCALAR("value 1",plain) ++// KEY ++// SCALAR("key 2",plain) ++// VALUE ++// SCALAR("value 2",plain) ++// BLOCK-END ++// BLOCK-ENTRY ++// BLOCK-MAPPING-START ++// KEY ++// SCALAR("complex key") ++// VALUE ++// SCALAR("complex value") ++// BLOCK-END ++// BLOCK-END ++// STREAM-END ++// ++// 2. Collections in a mapping: ++// ++// ? a sequence ++// : - item 1 ++// - item 2 ++// ? a mapping ++// : key 1: value 1 ++// key 2: value 2 ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// BLOCK-MAPPING-START ++// KEY ++// SCALAR("a sequence",plain) ++// VALUE ++// BLOCK-SEQUENCE-START ++// BLOCK-ENTRY ++// SCALAR("item 1",plain) ++// BLOCK-ENTRY ++// SCALAR("item 2",plain) ++// BLOCK-END ++// KEY ++// SCALAR("a mapping",plain) ++// VALUE ++// BLOCK-MAPPING-START ++// KEY ++// SCALAR("key 1",plain) ++// VALUE ++// SCALAR("value 1",plain) ++// KEY ++// SCALAR("key 2",plain) ++// VALUE ++// SCALAR("value 2",plain) ++// BLOCK-END ++// BLOCK-END ++// STREAM-END ++// ++// YAML also permits non-indented sequences if they are included into a block ++// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: ++// ++// key: ++// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. ++// - item 2 ++// ++// Tokens: ++// ++// STREAM-START(utf-8) ++// BLOCK-MAPPING-START ++// KEY ++// SCALAR("key",plain) ++// VALUE ++// BLOCK-ENTRY ++// SCALAR("item 1",plain) ++// BLOCK-ENTRY ++// SCALAR("item 2",plain) ++// BLOCK-END ++// ++ ++// Ensure that the buffer contains the required number of characters. ++// Return true on success, false on failure (reader error or memory error). ++func cache(parser *yaml_parser_t, length int) bool { ++ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) ++ return parser.unread >= length || yaml_parser_update_buffer(parser, length) ++} ++ ++// Advance the buffer pointer. ++func skip(parser *yaml_parser_t) { ++ if !is_blank(parser.buffer, parser.buffer_pos) { ++ parser.newlines = 0 ++ } ++ parser.mark.index++ ++ parser.mark.column++ ++ parser.unread-- ++ parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) ++} ++ ++func skip_line(parser *yaml_parser_t) { ++ if is_crlf(parser.buffer, parser.buffer_pos) { ++ parser.mark.index += 2 ++ parser.mark.column = 0 ++ parser.mark.line++ ++ parser.unread -= 2 ++ parser.buffer_pos += 2 ++ parser.newlines++ ++ } else if is_break(parser.buffer, parser.buffer_pos) { ++ parser.mark.index++ ++ parser.mark.column = 0 ++ parser.mark.line++ ++ parser.unread-- ++ parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) ++ parser.newlines++ ++ } ++} ++ ++// Copy a character to a string buffer and advance pointers. ++func read(parser *yaml_parser_t, s []byte) []byte { ++ if !is_blank(parser.buffer, parser.buffer_pos) { ++ parser.newlines = 0 ++ } ++ w := width(parser.buffer[parser.buffer_pos]) ++ if w == 0 { ++ panic("invalid character sequence") ++ } ++ if len(s) == 0 { ++ s = make([]byte, 0, 32) ++ } ++ if w == 1 && len(s)+w <= cap(s) { ++ s = s[:len(s)+1] ++ s[len(s)-1] = parser.buffer[parser.buffer_pos] ++ parser.buffer_pos++ ++ } else { ++ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) ++ parser.buffer_pos += w ++ } ++ parser.mark.index++ ++ parser.mark.column++ ++ parser.unread-- ++ return s ++} ++ ++// Copy a line break character to a string buffer and advance pointers. ++func read_line(parser *yaml_parser_t, s []byte) []byte { ++ buf := parser.buffer ++ pos := parser.buffer_pos ++ switch { ++ case buf[pos] == '\r' && buf[pos+1] == '\n': ++ // CR LF . LF ++ s = append(s, '\n') ++ parser.buffer_pos += 2 ++ parser.mark.index++ ++ parser.unread-- ++ case buf[pos] == '\r' || buf[pos] == '\n': ++ // CR|LF . LF ++ s = append(s, '\n') ++ parser.buffer_pos += 1 ++ case buf[pos] == '\xC2' && buf[pos+1] == '\x85': ++ // NEL . LF ++ s = append(s, '\n') ++ parser.buffer_pos += 2 ++ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): ++ // LS|PS . LS|PS ++ s = append(s, buf[parser.buffer_pos:pos+3]...) ++ parser.buffer_pos += 3 ++ default: ++ return s ++ } ++ parser.mark.index++ ++ parser.mark.column = 0 ++ parser.mark.line++ ++ parser.unread-- ++ parser.newlines++ ++ return s ++} ++ ++// Get the next token. ++func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { ++ // Erase the token object. ++ *token = yaml_token_t{} // [Go] Is this necessary? ++ ++ // No tokens after STREAM-END or error. ++ if parser.stream_end_produced || parser.error != yaml_NO_ERROR { ++ return true ++ } ++ ++ // Ensure that the tokens queue contains enough tokens. ++ if !parser.token_available { ++ if !yaml_parser_fetch_more_tokens(parser) { ++ return false ++ } ++ } ++ ++ // Fetch the next token from the queue. ++ *token = parser.tokens[parser.tokens_head] ++ parser.tokens_head++ ++ parser.tokens_parsed++ ++ parser.token_available = false ++ ++ if token.typ == yaml_STREAM_END_TOKEN { ++ parser.stream_end_produced = true ++ } ++ return true ++} ++ ++// Set the scanner error and return false. ++func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { ++ parser.error = yaml_SCANNER_ERROR ++ parser.context = context ++ parser.context_mark = context_mark ++ parser.problem = problem ++ parser.problem_mark = parser.mark ++ return false ++} ++ ++func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { ++ context := "while parsing a tag" ++ if directive { ++ context = "while parsing a %TAG directive" ++ } ++ return yaml_parser_set_scanner_error(parser, context, context_mark, problem) ++} ++ ++func trace(args ...interface{}) func() { ++ pargs := append([]interface{}{"+++"}, args...) ++ fmt.Println(pargs...) ++ pargs = append([]interface{}{"---"}, args...) ++ return func() { fmt.Println(pargs...) } ++} ++ ++// Ensure that the tokens queue contains at least one token which can be ++// returned to the Parser. ++func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { ++ // While we need more tokens to fetch, do it. ++ for { ++ // [Go] The comment parsing logic requires a lookahead of two tokens ++ // so that foot comments may be parsed in time of associating them ++ // with the tokens that are parsed before them, and also for line ++ // comments to be transformed into head comments in some edge cases. ++ if parser.tokens_head < len(parser.tokens)-2 { ++ // If a potential simple key is at the head position, we need to fetch ++ // the next token to disambiguate it. ++ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] ++ if !ok { ++ break ++ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { ++ return false ++ } else if !valid { ++ break ++ } ++ } ++ // Fetch the next token. ++ if !yaml_parser_fetch_next_token(parser) { ++ return false ++ } ++ } ++ ++ parser.token_available = true ++ return true ++} ++ ++// The dispatcher for token fetchers. ++func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { ++ // Ensure that the buffer is initialized. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ ++ // Check if we just started scanning. Fetch STREAM-START then. ++ if !parser.stream_start_produced { ++ return yaml_parser_fetch_stream_start(parser) ++ } ++ ++ scan_mark := parser.mark ++ ++ // Eat whitespaces and comments until we reach the next token. ++ if !yaml_parser_scan_to_next_token(parser) { ++ return false ++ } ++ ++ // [Go] While unrolling indents, transform the head comments of prior ++ // indentation levels observed after scan_start into foot comments at ++ // the respective indexes. ++ ++ // Check the indentation level against the current column. ++ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { ++ return false ++ } ++ ++ // Ensure that the buffer contains at least 4 characters. 4 is the length ++ // of the longest indicators ('--- ' and '... '). ++ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { ++ return false ++ } ++ ++ // Is it the end of the stream? ++ if is_z(parser.buffer, parser.buffer_pos) { ++ return yaml_parser_fetch_stream_end(parser) ++ } ++ ++ // Is it a directive? ++ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { ++ return yaml_parser_fetch_directive(parser) ++ } ++ ++ buf := parser.buffer ++ pos := parser.buffer_pos ++ ++ // Is it the document start indicator? ++ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { ++ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) ++ } ++ ++ // Is it the document end indicator? ++ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { ++ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) ++ } ++ ++ comment_mark := parser.mark ++ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { ++ // Associate any following comments with the prior token. ++ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark ++ } ++ defer func() { ++ if !ok { ++ return ++ } ++ if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { ++ // Sequence indicators alone have no line comments. It becomes ++ // a head comment for whatever follows. ++ return ++ } ++ if !yaml_parser_scan_line_comment(parser, comment_mark) { ++ ok = false ++ return ++ } ++ }() ++ ++ // Is it the flow sequence start indicator? ++ if buf[pos] == '[' { ++ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) ++ } ++ ++ // Is it the flow mapping start indicator? ++ if parser.buffer[parser.buffer_pos] == '{' { ++ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) ++ } ++ ++ // Is it the flow sequence end indicator? ++ if parser.buffer[parser.buffer_pos] == ']' { ++ return yaml_parser_fetch_flow_collection_end(parser, ++ yaml_FLOW_SEQUENCE_END_TOKEN) ++ } ++ ++ // Is it the flow mapping end indicator? ++ if parser.buffer[parser.buffer_pos] == '}' { ++ return yaml_parser_fetch_flow_collection_end(parser, ++ yaml_FLOW_MAPPING_END_TOKEN) ++ } ++ ++ // Is it the flow entry indicator? ++ if parser.buffer[parser.buffer_pos] == ',' { ++ return yaml_parser_fetch_flow_entry(parser) ++ } ++ ++ // Is it the block entry indicator? ++ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { ++ return yaml_parser_fetch_block_entry(parser) ++ } ++ ++ // Is it the key indicator? ++ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { ++ return yaml_parser_fetch_key(parser) ++ } ++ ++ // Is it the value indicator? ++ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { ++ return yaml_parser_fetch_value(parser) ++ } ++ ++ // Is it an alias? ++ if parser.buffer[parser.buffer_pos] == '*' { ++ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) ++ } ++ ++ // Is it an anchor? ++ if parser.buffer[parser.buffer_pos] == '&' { ++ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) ++ } ++ ++ // Is it a tag? ++ if parser.buffer[parser.buffer_pos] == '!' { ++ return yaml_parser_fetch_tag(parser) ++ } ++ ++ // Is it a literal scalar? ++ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { ++ return yaml_parser_fetch_block_scalar(parser, true) ++ } ++ ++ // Is it a folded scalar? ++ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { ++ return yaml_parser_fetch_block_scalar(parser, false) ++ } ++ ++ // Is it a single-quoted scalar? ++ if parser.buffer[parser.buffer_pos] == '\'' { ++ return yaml_parser_fetch_flow_scalar(parser, true) ++ } ++ ++ // Is it a double-quoted scalar? ++ if parser.buffer[parser.buffer_pos] == '"' { ++ return yaml_parser_fetch_flow_scalar(parser, false) ++ } ++ ++ // Is it a plain scalar? ++ // ++ // A plain scalar may start with any non-blank characters except ++ // ++ // '-', '?', ':', ',', '[', ']', '{', '}', ++ // '#', '&', '*', '!', '|', '>', '\'', '\"', ++ // '%', '@', '`'. ++ // ++ // In the block context (and, for the '-' indicator, in the flow context ++ // too), it may also start with the characters ++ // ++ // '-', '?', ':' ++ // ++ // if it is followed by a non-space character. ++ // ++ // The last rule is more restrictive than the specification requires. ++ // [Go] TODO Make this logic more reasonable. ++ //switch parser.buffer[parser.buffer_pos] { ++ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': ++ //} ++ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || ++ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || ++ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || ++ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || ++ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || ++ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || ++ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || ++ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || ++ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || ++ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || ++ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || ++ (parser.flow_level == 0 && ++ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && ++ !is_blankz(parser.buffer, parser.buffer_pos+1)) { ++ return yaml_parser_fetch_plain_scalar(parser) ++ } ++ ++ // If we don't determine the token type so far, it is an error. ++ return yaml_parser_set_scanner_error(parser, ++ "while scanning for the next token", parser.mark, ++ "found character that cannot start any token") ++} ++ ++func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { ++ if !simple_key.possible { ++ return false, true ++ } ++ ++ // The 1.2 specification says: ++ // ++ // "If the ? indicator is omitted, parsing needs to see past the ++ // implicit key to recognize it as such. To limit the amount of ++ // lookahead required, the “:” indicator must appear at most 1024 ++ // Unicode characters beyond the start of the key. In addition, the key ++ // is restricted to a single line." ++ // ++ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { ++ // Check if the potential simple key to be removed is required. ++ if simple_key.required { ++ return false, yaml_parser_set_scanner_error(parser, ++ "while scanning a simple key", simple_key.mark, ++ "could not find expected ':'") ++ } ++ simple_key.possible = false ++ return false, true ++ } ++ return true, true ++} ++ ++// Check if a simple key may start at the current position and add it if ++// needed. ++func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { ++ // A simple key is required at the current position if the scanner is in ++ // the block context and the current column coincides with the indentation ++ // level. ++ ++ required := parser.flow_level == 0 && parser.indent == parser.mark.column ++ ++ // ++ // If the current position may start a simple key, save it. ++ // ++ if parser.simple_key_allowed { ++ simple_key := yaml_simple_key_t{ ++ possible: true, ++ required: required, ++ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), ++ mark: parser.mark, ++ } ++ ++ if !yaml_parser_remove_simple_key(parser) { ++ return false ++ } ++ parser.simple_keys[len(parser.simple_keys)-1] = simple_key ++ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 ++ } ++ return true ++} ++ ++// Remove a potential simple key at the current flow level. ++func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { ++ i := len(parser.simple_keys) - 1 ++ if parser.simple_keys[i].possible { ++ // If the key is required, it is an error. ++ if parser.simple_keys[i].required { ++ return yaml_parser_set_scanner_error(parser, ++ "while scanning a simple key", parser.simple_keys[i].mark, ++ "could not find expected ':'") ++ } ++ // Remove the key from the stack. ++ parser.simple_keys[i].possible = false ++ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) ++ } ++ return true ++} ++ ++// max_flow_level limits the flow_level ++const max_flow_level = 10000 ++ ++// Increase the flow level and resize the simple key list if needed. ++func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { ++ // Reset the simple key on the next level. ++ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ ++ possible: false, ++ required: false, ++ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), ++ mark: parser.mark, ++ }) ++ ++ // Increase the flow level. ++ parser.flow_level++ ++ if parser.flow_level > max_flow_level { ++ return yaml_parser_set_scanner_error(parser, ++ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, ++ fmt.Sprintf("exceeded max depth of %d", max_flow_level)) ++ } ++ return true ++} ++ ++// Decrease the flow level. ++func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { ++ if parser.flow_level > 0 { ++ parser.flow_level-- ++ last := len(parser.simple_keys) - 1 ++ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) ++ parser.simple_keys = parser.simple_keys[:last] ++ } ++ return true ++} ++ ++// max_indents limits the indents stack size ++const max_indents = 10000 ++ ++// Push the current indentation level to the stack and set the new level ++// the current column is greater than the indentation level. In this case, ++// append or insert the specified token into the token queue. ++func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { ++ // In the flow context, do nothing. ++ if parser.flow_level > 0 { ++ return true ++ } ++ ++ if parser.indent < column { ++ // Push the current indentation level to the stack and set the new ++ // indentation level. ++ parser.indents = append(parser.indents, parser.indent) ++ parser.indent = column ++ if len(parser.indents) > max_indents { ++ return yaml_parser_set_scanner_error(parser, ++ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, ++ fmt.Sprintf("exceeded max depth of %d", max_indents)) ++ } ++ ++ // Create a token and insert it into the queue. ++ token := yaml_token_t{ ++ typ: typ, ++ start_mark: mark, ++ end_mark: mark, ++ } ++ if number > -1 { ++ number -= parser.tokens_parsed ++ } ++ yaml_insert_token(parser, number, &token) ++ } ++ return true ++} ++ ++// Pop indentation levels from the indents stack until the current level ++// becomes less or equal to the column. For each indentation level, append ++// the BLOCK-END token. ++func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { ++ // In the flow context, do nothing. ++ if parser.flow_level > 0 { ++ return true ++ } ++ ++ block_mark := scan_mark ++ block_mark.index-- ++ ++ // Loop through the indentation levels in the stack. ++ for parser.indent > column { ++ ++ // [Go] Reposition the end token before potential following ++ // foot comments of parent blocks. For that, search ++ // backwards for recent comments that were at the same ++ // indent as the block that is ending now. ++ stop_index := block_mark.index ++ for i := len(parser.comments) - 1; i >= 0; i-- { ++ comment := &parser.comments[i] ++ ++ if comment.end_mark.index < stop_index { ++ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. ++ // If requested indent column is < 0, then the document is over and everything else ++ // is a foot anyway. ++ break ++ } ++ if comment.start_mark.column == parser.indent+1 { ++ // This is a good match. But maybe there's a former comment ++ // at that same indent level, so keep searching. ++ block_mark = comment.start_mark ++ } ++ ++ // While the end of the former comment matches with ++ // the start of the following one, we know there's ++ // nothing in between and scanning is still safe. ++ stop_index = comment.scan_mark.index ++ } ++ ++ // Create a token and append it to the queue. ++ token := yaml_token_t{ ++ typ: yaml_BLOCK_END_TOKEN, ++ start_mark: block_mark, ++ end_mark: block_mark, ++ } ++ yaml_insert_token(parser, -1, &token) ++ ++ // Pop the indentation level. ++ parser.indent = parser.indents[len(parser.indents)-1] ++ parser.indents = parser.indents[:len(parser.indents)-1] ++ } ++ return true ++} ++ ++// Initialize the scanner and produce the STREAM-START token. ++func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { ++ ++ // Set the initial indentation. ++ parser.indent = -1 ++ ++ // Initialize the simple key stack. ++ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) ++ ++ parser.simple_keys_by_tok = make(map[int]int) ++ ++ // A simple key is allowed at the beginning of the stream. ++ parser.simple_key_allowed = true ++ ++ // We have started. ++ parser.stream_start_produced = true ++ ++ // Create the STREAM-START token and append it to the queue. ++ token := yaml_token_t{ ++ typ: yaml_STREAM_START_TOKEN, ++ start_mark: parser.mark, ++ end_mark: parser.mark, ++ encoding: parser.encoding, ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the STREAM-END token and shut down the scanner. ++func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { ++ ++ // Force new line. ++ if parser.mark.column != 0 { ++ parser.mark.column = 0 ++ parser.mark.line++ ++ } ++ ++ // Reset the indentation level. ++ if !yaml_parser_unroll_indent(parser, -1, parser.mark) { ++ return false ++ } ++ ++ // Reset simple keys. ++ if !yaml_parser_remove_simple_key(parser) { ++ return false ++ } ++ ++ parser.simple_key_allowed = false ++ ++ // Create the STREAM-END token and append it to the queue. ++ token := yaml_token_t{ ++ typ: yaml_STREAM_END_TOKEN, ++ start_mark: parser.mark, ++ end_mark: parser.mark, ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. ++func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { ++ // Reset the indentation level. ++ if !yaml_parser_unroll_indent(parser, -1, parser.mark) { ++ return false ++ } ++ ++ // Reset simple keys. ++ if !yaml_parser_remove_simple_key(parser) { ++ return false ++ } ++ ++ parser.simple_key_allowed = false ++ ++ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. ++ token := yaml_token_t{} ++ if !yaml_parser_scan_directive(parser, &token) { ++ return false ++ } ++ // Append the token to the queue. ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the DOCUMENT-START or DOCUMENT-END token. ++func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { ++ // Reset the indentation level. ++ if !yaml_parser_unroll_indent(parser, -1, parser.mark) { ++ return false ++ } ++ ++ // Reset simple keys. ++ if !yaml_parser_remove_simple_key(parser) { ++ return false ++ } ++ ++ parser.simple_key_allowed = false ++ ++ // Consume the token. ++ start_mark := parser.mark ++ ++ skip(parser) ++ skip(parser) ++ skip(parser) ++ ++ end_mark := parser.mark ++ ++ // Create the DOCUMENT-START or DOCUMENT-END token. ++ token := yaml_token_t{ ++ typ: typ, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ } ++ // Append the token to the queue. ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. ++func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { ++ ++ // The indicators '[' and '{' may start a simple key. ++ if !yaml_parser_save_simple_key(parser) { ++ return false ++ } ++ ++ // Increase the flow level. ++ if !yaml_parser_increase_flow_level(parser) { ++ return false ++ } ++ ++ // A simple key may follow the indicators '[' and '{'. ++ parser.simple_key_allowed = true ++ ++ // Consume the token. ++ start_mark := parser.mark ++ skip(parser) ++ end_mark := parser.mark ++ ++ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. ++ token := yaml_token_t{ ++ typ: typ, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ } ++ // Append the token to the queue. ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. ++func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { ++ // Reset any potential simple key on the current flow level. ++ if !yaml_parser_remove_simple_key(parser) { ++ return false ++ } ++ ++ // Decrease the flow level. ++ if !yaml_parser_decrease_flow_level(parser) { ++ return false ++ } ++ ++ // No simple keys after the indicators ']' and '}'. ++ parser.simple_key_allowed = false ++ ++ // Consume the token. ++ ++ start_mark := parser.mark ++ skip(parser) ++ end_mark := parser.mark ++ ++ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. ++ token := yaml_token_t{ ++ typ: typ, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ } ++ // Append the token to the queue. ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the FLOW-ENTRY token. ++func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { ++ // Reset any potential simple keys on the current flow level. ++ if !yaml_parser_remove_simple_key(parser) { ++ return false ++ } ++ ++ // Simple keys are allowed after ','. ++ parser.simple_key_allowed = true ++ ++ // Consume the token. ++ start_mark := parser.mark ++ skip(parser) ++ end_mark := parser.mark ++ ++ // Create the FLOW-ENTRY token and append it to the queue. ++ token := yaml_token_t{ ++ typ: yaml_FLOW_ENTRY_TOKEN, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the BLOCK-ENTRY token. ++func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { ++ // Check if the scanner is in the block context. ++ if parser.flow_level == 0 { ++ // Check if we are allowed to start a new entry. ++ if !parser.simple_key_allowed { ++ return yaml_parser_set_scanner_error(parser, "", parser.mark, ++ "block sequence entries are not allowed in this context") ++ } ++ // Add the BLOCK-SEQUENCE-START token if needed. ++ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { ++ return false ++ } ++ } else { ++ // It is an error for the '-' indicator to occur in the flow context, ++ // but we let the Parser detect and report about it because the Parser ++ // is able to point to the context. ++ } ++ ++ // Reset any potential simple keys on the current flow level. ++ if !yaml_parser_remove_simple_key(parser) { ++ return false ++ } ++ ++ // Simple keys are allowed after '-'. ++ parser.simple_key_allowed = true ++ ++ // Consume the token. ++ start_mark := parser.mark ++ skip(parser) ++ end_mark := parser.mark ++ ++ // Create the BLOCK-ENTRY token and append it to the queue. ++ token := yaml_token_t{ ++ typ: yaml_BLOCK_ENTRY_TOKEN, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the KEY token. ++func yaml_parser_fetch_key(parser *yaml_parser_t) bool { ++ ++ // In the block context, additional checks are required. ++ if parser.flow_level == 0 { ++ // Check if we are allowed to start a new key (not nessesary simple). ++ if !parser.simple_key_allowed { ++ return yaml_parser_set_scanner_error(parser, "", parser.mark, ++ "mapping keys are not allowed in this context") ++ } ++ // Add the BLOCK-MAPPING-START token if needed. ++ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { ++ return false ++ } ++ } ++ ++ // Reset any potential simple keys on the current flow level. ++ if !yaml_parser_remove_simple_key(parser) { ++ return false ++ } ++ ++ // Simple keys are allowed after '?' in the block context. ++ parser.simple_key_allowed = parser.flow_level == 0 ++ ++ // Consume the token. ++ start_mark := parser.mark ++ skip(parser) ++ end_mark := parser.mark ++ ++ // Create the KEY token and append it to the queue. ++ token := yaml_token_t{ ++ typ: yaml_KEY_TOKEN, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the VALUE token. ++func yaml_parser_fetch_value(parser *yaml_parser_t) bool { ++ ++ simple_key := &parser.simple_keys[len(parser.simple_keys)-1] ++ ++ // Have we found a simple key? ++ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { ++ return false ++ ++ } else if valid { ++ ++ // Create the KEY token and insert it into the queue. ++ token := yaml_token_t{ ++ typ: yaml_KEY_TOKEN, ++ start_mark: simple_key.mark, ++ end_mark: simple_key.mark, ++ } ++ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) ++ ++ // In the block context, we may need to add the BLOCK-MAPPING-START token. ++ if !yaml_parser_roll_indent(parser, simple_key.mark.column, ++ simple_key.token_number, ++ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { ++ return false ++ } ++ ++ // Remove the simple key. ++ simple_key.possible = false ++ delete(parser.simple_keys_by_tok, simple_key.token_number) ++ ++ // A simple key cannot follow another simple key. ++ parser.simple_key_allowed = false ++ ++ } else { ++ // The ':' indicator follows a complex key. ++ ++ // In the block context, extra checks are required. ++ if parser.flow_level == 0 { ++ ++ // Check if we are allowed to start a complex value. ++ if !parser.simple_key_allowed { ++ return yaml_parser_set_scanner_error(parser, "", parser.mark, ++ "mapping values are not allowed in this context") ++ } ++ ++ // Add the BLOCK-MAPPING-START token if needed. ++ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { ++ return false ++ } ++ } ++ ++ // Simple keys after ':' are allowed in the block context. ++ parser.simple_key_allowed = parser.flow_level == 0 ++ } ++ ++ // Consume the token. ++ start_mark := parser.mark ++ skip(parser) ++ end_mark := parser.mark ++ ++ // Create the VALUE token and append it to the queue. ++ token := yaml_token_t{ ++ typ: yaml_VALUE_TOKEN, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the ALIAS or ANCHOR token. ++func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { ++ // An anchor or an alias could be a simple key. ++ if !yaml_parser_save_simple_key(parser) { ++ return false ++ } ++ ++ // A simple key cannot follow an anchor or an alias. ++ parser.simple_key_allowed = false ++ ++ // Create the ALIAS or ANCHOR token and append it to the queue. ++ var token yaml_token_t ++ if !yaml_parser_scan_anchor(parser, &token, typ) { ++ return false ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the TAG token. ++func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { ++ // A tag could be a simple key. ++ if !yaml_parser_save_simple_key(parser) { ++ return false ++ } ++ ++ // A simple key cannot follow a tag. ++ parser.simple_key_allowed = false ++ ++ // Create the TAG token and append it to the queue. ++ var token yaml_token_t ++ if !yaml_parser_scan_tag(parser, &token) { ++ return false ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. ++func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { ++ // Remove any potential simple keys. ++ if !yaml_parser_remove_simple_key(parser) { ++ return false ++ } ++ ++ // A simple key may follow a block scalar. ++ parser.simple_key_allowed = true ++ ++ // Create the SCALAR token and append it to the queue. ++ var token yaml_token_t ++ if !yaml_parser_scan_block_scalar(parser, &token, literal) { ++ return false ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. ++func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { ++ // A plain scalar could be a simple key. ++ if !yaml_parser_save_simple_key(parser) { ++ return false ++ } ++ ++ // A simple key cannot follow a flow scalar. ++ parser.simple_key_allowed = false ++ ++ // Create the SCALAR token and append it to the queue. ++ var token yaml_token_t ++ if !yaml_parser_scan_flow_scalar(parser, &token, single) { ++ return false ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Produce the SCALAR(...,plain) token. ++func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { ++ // A plain scalar could be a simple key. ++ if !yaml_parser_save_simple_key(parser) { ++ return false ++ } ++ ++ // A simple key cannot follow a flow scalar. ++ parser.simple_key_allowed = false ++ ++ // Create the SCALAR token and append it to the queue. ++ var token yaml_token_t ++ if !yaml_parser_scan_plain_scalar(parser, &token) { ++ return false ++ } ++ yaml_insert_token(parser, -1, &token) ++ return true ++} ++ ++// Eat whitespaces and comments until the next token is found. ++func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { ++ ++ scan_mark := parser.mark ++ ++ // Until the next token is not found. ++ for { ++ // Allow the BOM mark to start a line. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { ++ skip(parser) ++ } ++ ++ // Eat whitespaces. ++ // Tabs are allowed: ++ // - in the flow context ++ // - in the block context, but not at the beginning of the line or ++ // after '-', '?', or ':' (complex value). ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ ++ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { ++ skip(parser) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ // Check if we just had a line comment under a sequence entry that ++ // looks more like a header to the following content. Similar to this: ++ // ++ // - # The comment ++ // - Some data ++ // ++ // If so, transform the line comment to a head comment and reposition. ++ if len(parser.comments) > 0 && len(parser.tokens) > 1 { ++ tokenA := parser.tokens[len(parser.tokens)-2] ++ tokenB := parser.tokens[len(parser.tokens)-1] ++ comment := &parser.comments[len(parser.comments)-1] ++ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { ++ // If it was in the prior line, reposition so it becomes a ++ // header of the follow up token. Otherwise, keep it in place ++ // so it becomes a header of the former. ++ comment.head = comment.line ++ comment.line = nil ++ if comment.start_mark.line == parser.mark.line-1 { ++ comment.token_mark = parser.mark ++ } ++ } ++ } ++ ++ // Eat a comment until a line break. ++ if parser.buffer[parser.buffer_pos] == '#' { ++ if !yaml_parser_scan_comments(parser, scan_mark) { ++ return false ++ } ++ } ++ ++ // If it is a line break, eat it. ++ if is_break(parser.buffer, parser.buffer_pos) { ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ skip_line(parser) ++ ++ // In the block context, a new line may start a simple key. ++ if parser.flow_level == 0 { ++ parser.simple_key_allowed = true ++ } ++ } else { ++ break // We have found a token. ++ } ++ } ++ ++ return true ++} ++ ++// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. ++// ++// Scope: ++// %YAML 1.1 # a comment \n ++// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++// %TAG !yaml! tag:yaml.org,2002: \n ++// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++// ++func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { ++ // Eat '%'. ++ start_mark := parser.mark ++ skip(parser) ++ ++ // Scan the directive name. ++ var name []byte ++ if !yaml_parser_scan_directive_name(parser, start_mark, &name) { ++ return false ++ } ++ ++ // Is it a YAML directive? ++ if bytes.Equal(name, []byte("YAML")) { ++ // Scan the VERSION directive value. ++ var major, minor int8 ++ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { ++ return false ++ } ++ end_mark := parser.mark ++ ++ // Create a VERSION-DIRECTIVE token. ++ *token = yaml_token_t{ ++ typ: yaml_VERSION_DIRECTIVE_TOKEN, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ major: major, ++ minor: minor, ++ } ++ ++ // Is it a TAG directive? ++ } else if bytes.Equal(name, []byte("TAG")) { ++ // Scan the TAG directive value. ++ var handle, prefix []byte ++ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { ++ return false ++ } ++ end_mark := parser.mark ++ ++ // Create a TAG-DIRECTIVE token. ++ *token = yaml_token_t{ ++ typ: yaml_TAG_DIRECTIVE_TOKEN, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ value: handle, ++ prefix: prefix, ++ } ++ ++ // Unknown directive. ++ } else { ++ yaml_parser_set_scanner_error(parser, "while scanning a directive", ++ start_mark, "found unknown directive name") ++ return false ++ } ++ ++ // Eat the rest of the line including any comments. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ ++ for is_blank(parser.buffer, parser.buffer_pos) { ++ skip(parser) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ if parser.buffer[parser.buffer_pos] == '#' { ++ // [Go] Discard this inline comment for the time being. ++ //if !yaml_parser_scan_line_comment(parser, start_mark) { ++ // return false ++ //} ++ for !is_breakz(parser.buffer, parser.buffer_pos) { ++ skip(parser) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ } ++ ++ // Check if we are at the end of the line. ++ if !is_breakz(parser.buffer, parser.buffer_pos) { ++ yaml_parser_set_scanner_error(parser, "while scanning a directive", ++ start_mark, "did not find expected comment or line break") ++ return false ++ } ++ ++ // Eat a line break. ++ if is_break(parser.buffer, parser.buffer_pos) { ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ skip_line(parser) ++ } ++ ++ return true ++} ++ ++// Scan the directive name. ++// ++// Scope: ++// %YAML 1.1 # a comment \n ++// ^^^^ ++// %TAG !yaml! tag:yaml.org,2002: \n ++// ^^^ ++// ++func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { ++ // Consume the directive name. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ ++ var s []byte ++ for is_alpha(parser.buffer, parser.buffer_pos) { ++ s = read(parser, s) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ // Check if the name is empty. ++ if len(s) == 0 { ++ yaml_parser_set_scanner_error(parser, "while scanning a directive", ++ start_mark, "could not find expected directive name") ++ return false ++ } ++ ++ // Check for an blank character after the name. ++ if !is_blankz(parser.buffer, parser.buffer_pos) { ++ yaml_parser_set_scanner_error(parser, "while scanning a directive", ++ start_mark, "found unexpected non-alphabetical character") ++ return false ++ } ++ *name = s ++ return true ++} ++ ++// Scan the value of VERSION-DIRECTIVE. ++// ++// Scope: ++// %YAML 1.1 # a comment \n ++// ^^^^^^ ++func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { ++ // Eat whitespaces. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ for is_blank(parser.buffer, parser.buffer_pos) { ++ skip(parser) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ // Consume the major version number. ++ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { ++ return false ++ } ++ ++ // Eat '.'. ++ if parser.buffer[parser.buffer_pos] != '.' { ++ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", ++ start_mark, "did not find expected digit or '.' character") ++ } ++ ++ skip(parser) ++ ++ // Consume the minor version number. ++ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { ++ return false ++ } ++ return true ++} ++ ++const max_number_length = 2 ++ ++// Scan the version number of VERSION-DIRECTIVE. ++// ++// Scope: ++// %YAML 1.1 # a comment \n ++// ^ ++// %YAML 1.1 # a comment \n ++// ^ ++func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { ++ ++ // Repeat while the next character is digit. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ var value, length int8 ++ for is_digit(parser.buffer, parser.buffer_pos) { ++ // Check if the number is too long. ++ length++ ++ if length > max_number_length { ++ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", ++ start_mark, "found extremely long version number") ++ } ++ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) ++ skip(parser) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ // Check if the number was present. ++ if length == 0 { ++ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", ++ start_mark, "did not find expected version number") ++ } ++ *number = value ++ return true ++} ++ ++// Scan the value of a TAG-DIRECTIVE token. ++// ++// Scope: ++// %TAG !yaml! tag:yaml.org,2002: \n ++// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++// ++func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { ++ var handle_value, prefix_value []byte ++ ++ // Eat whitespaces. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ ++ for is_blank(parser.buffer, parser.buffer_pos) { ++ skip(parser) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ // Scan a handle. ++ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { ++ return false ++ } ++ ++ // Expect a whitespace. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ if !is_blank(parser.buffer, parser.buffer_pos) { ++ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", ++ start_mark, "did not find expected whitespace") ++ return false ++ } ++ ++ // Eat whitespaces. ++ for is_blank(parser.buffer, parser.buffer_pos) { ++ skip(parser) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ // Scan a prefix. ++ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { ++ return false ++ } ++ ++ // Expect a whitespace or line break. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ if !is_blankz(parser.buffer, parser.buffer_pos) { ++ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", ++ start_mark, "did not find expected whitespace or line break") ++ return false ++ } ++ ++ *handle = handle_value ++ *prefix = prefix_value ++ return true ++} ++ ++func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { ++ var s []byte ++ ++ // Eat the indicator character. ++ start_mark := parser.mark ++ skip(parser) ++ ++ // Consume the value. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ ++ for is_alpha(parser.buffer, parser.buffer_pos) { ++ s = read(parser, s) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ end_mark := parser.mark ++ ++ /* ++ * Check if length of the anchor is greater than 0 and it is followed by ++ * a whitespace character or one of the indicators: ++ * ++ * '?', ':', ',', ']', '}', '%', '@', '`'. ++ */ ++ ++ if len(s) == 0 || ++ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || ++ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || ++ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || ++ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || ++ parser.buffer[parser.buffer_pos] == '`') { ++ context := "while scanning an alias" ++ if typ == yaml_ANCHOR_TOKEN { ++ context = "while scanning an anchor" ++ } ++ yaml_parser_set_scanner_error(parser, context, start_mark, ++ "did not find expected alphabetic or numeric character") ++ return false ++ } ++ ++ // Create a token. ++ *token = yaml_token_t{ ++ typ: typ, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ value: s, ++ } ++ ++ return true ++} ++ ++/* ++ * Scan a TAG token. ++ */ ++ ++func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { ++ var handle, suffix []byte ++ ++ start_mark := parser.mark ++ ++ // Check if the tag is in the canonical form. ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ ++ if parser.buffer[parser.buffer_pos+1] == '<' { ++ // Keep the handle as '' ++ ++ // Eat '!<' ++ skip(parser) ++ skip(parser) ++ ++ // Consume the tag value. ++ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { ++ return false ++ } ++ ++ // Check for '>' and eat it. ++ if parser.buffer[parser.buffer_pos] != '>' { ++ yaml_parser_set_scanner_error(parser, "while scanning a tag", ++ start_mark, "did not find the expected '>'") ++ return false ++ } ++ ++ skip(parser) ++ } else { ++ // The tag has either the '!suffix' or the '!handle!suffix' form. ++ ++ // First, try to scan a handle. ++ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { ++ return false ++ } ++ ++ // Check if it is, indeed, handle. ++ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { ++ // Scan the suffix now. ++ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { ++ return false ++ } ++ } else { ++ // It wasn't a handle after all. Scan the rest of the tag. ++ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { ++ return false ++ } ++ ++ // Set the handle to '!'. ++ handle = []byte{'!'} ++ ++ // A special case: the '!' tag. Set the handle to '' and the ++ // suffix to '!'. ++ if len(suffix) == 0 { ++ handle, suffix = suffix, handle ++ } ++ } ++ } ++ ++ // Check the character which ends the tag. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ if !is_blankz(parser.buffer, parser.buffer_pos) { ++ yaml_parser_set_scanner_error(parser, "while scanning a tag", ++ start_mark, "did not find expected whitespace or line break") ++ return false ++ } ++ ++ end_mark := parser.mark ++ ++ // Create a token. ++ *token = yaml_token_t{ ++ typ: yaml_TAG_TOKEN, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ value: handle, ++ suffix: suffix, ++ } ++ return true ++} ++ ++// Scan a tag handle. ++func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { ++ // Check the initial '!' character. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ if parser.buffer[parser.buffer_pos] != '!' { ++ yaml_parser_set_scanner_tag_error(parser, directive, ++ start_mark, "did not find expected '!'") ++ return false ++ } ++ ++ var s []byte ++ ++ // Copy the '!' character. ++ s = read(parser, s) ++ ++ // Copy all subsequent alphabetical and numerical characters. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ for is_alpha(parser.buffer, parser.buffer_pos) { ++ s = read(parser, s) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ // Check if the trailing character is '!' and copy it. ++ if parser.buffer[parser.buffer_pos] == '!' { ++ s = read(parser, s) ++ } else { ++ // It's either the '!' tag or not really a tag handle. If it's a %TAG ++ // directive, it's an error. If it's a tag token, it must be a part of URI. ++ if directive && string(s) != "!" { ++ yaml_parser_set_scanner_tag_error(parser, directive, ++ start_mark, "did not find expected '!'") ++ return false ++ } ++ } ++ ++ *handle = s ++ return true ++} ++ ++// Scan a tag. ++func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { ++ //size_t length = head ? strlen((char *)head) : 0 ++ var s []byte ++ hasTag := len(head) > 0 ++ ++ // Copy the head if needed. ++ // ++ // Note that we don't copy the leading '!' character. ++ if len(head) > 1 { ++ s = append(s, head[1:]...) ++ } ++ ++ // Scan the tag. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ ++ // The set of characters that may appear in URI is as follows: ++ // ++ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', ++ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', ++ // '%'. ++ // [Go] TODO Convert this into more reasonable logic. ++ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || ++ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || ++ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || ++ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || ++ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || ++ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || ++ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || ++ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || ++ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || ++ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || ++ parser.buffer[parser.buffer_pos] == '%' { ++ // Check if it is a URI-escape sequence. ++ if parser.buffer[parser.buffer_pos] == '%' { ++ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { ++ return false ++ } ++ } else { ++ s = read(parser, s) ++ } ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ hasTag = true ++ } ++ ++ if !hasTag { ++ yaml_parser_set_scanner_tag_error(parser, directive, ++ start_mark, "did not find expected tag URI") ++ return false ++ } ++ *uri = s ++ return true ++} ++ ++// Decode an URI-escape sequence corresponding to a single UTF-8 character. ++func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { ++ ++ // Decode the required number of characters. ++ w := 1024 ++ for w > 0 { ++ // Check for a URI-escaped octet. ++ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { ++ return false ++ } ++ ++ if !(parser.buffer[parser.buffer_pos] == '%' && ++ is_hex(parser.buffer, parser.buffer_pos+1) && ++ is_hex(parser.buffer, parser.buffer_pos+2)) { ++ return yaml_parser_set_scanner_tag_error(parser, directive, ++ start_mark, "did not find URI escaped octet") ++ } ++ ++ // Get the octet. ++ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) ++ ++ // If it is the leading octet, determine the length of the UTF-8 sequence. ++ if w == 1024 { ++ w = width(octet) ++ if w == 0 { ++ return yaml_parser_set_scanner_tag_error(parser, directive, ++ start_mark, "found an incorrect leading UTF-8 octet") ++ } ++ } else { ++ // Check if the trailing octet is correct. ++ if octet&0xC0 != 0x80 { ++ return yaml_parser_set_scanner_tag_error(parser, directive, ++ start_mark, "found an incorrect trailing UTF-8 octet") ++ } ++ } ++ ++ // Copy the octet and move the pointers. ++ *s = append(*s, octet) ++ skip(parser) ++ skip(parser) ++ skip(parser) ++ w-- ++ } ++ return true ++} ++ ++// Scan a block scalar. ++func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { ++ // Eat the indicator '|' or '>'. ++ start_mark := parser.mark ++ skip(parser) ++ ++ // Scan the additional block scalar indicators. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ ++ // Check for a chomping indicator. ++ var chomping, increment int ++ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { ++ // Set the chomping method and eat the indicator. ++ if parser.buffer[parser.buffer_pos] == '+' { ++ chomping = +1 ++ } else { ++ chomping = -1 ++ } ++ skip(parser) ++ ++ // Check for an indentation indicator. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ if is_digit(parser.buffer, parser.buffer_pos) { ++ // Check that the indentation is greater than 0. ++ if parser.buffer[parser.buffer_pos] == '0' { ++ yaml_parser_set_scanner_error(parser, "while scanning a block scalar", ++ start_mark, "found an indentation indicator equal to 0") ++ return false ++ } ++ ++ // Get the indentation level and eat the indicator. ++ increment = as_digit(parser.buffer, parser.buffer_pos) ++ skip(parser) ++ } ++ ++ } else if is_digit(parser.buffer, parser.buffer_pos) { ++ // Do the same as above, but in the opposite order. ++ ++ if parser.buffer[parser.buffer_pos] == '0' { ++ yaml_parser_set_scanner_error(parser, "while scanning a block scalar", ++ start_mark, "found an indentation indicator equal to 0") ++ return false ++ } ++ increment = as_digit(parser.buffer, parser.buffer_pos) ++ skip(parser) ++ ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { ++ if parser.buffer[parser.buffer_pos] == '+' { ++ chomping = +1 ++ } else { ++ chomping = -1 ++ } ++ skip(parser) ++ } ++ } ++ ++ // Eat whitespaces and comments to the end of the line. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ for is_blank(parser.buffer, parser.buffer_pos) { ++ skip(parser) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ if parser.buffer[parser.buffer_pos] == '#' { ++ if !yaml_parser_scan_line_comment(parser, start_mark) { ++ return false ++ } ++ for !is_breakz(parser.buffer, parser.buffer_pos) { ++ skip(parser) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ } ++ ++ // Check if we are at the end of the line. ++ if !is_breakz(parser.buffer, parser.buffer_pos) { ++ yaml_parser_set_scanner_error(parser, "while scanning a block scalar", ++ start_mark, "did not find expected comment or line break") ++ return false ++ } ++ ++ // Eat a line break. ++ if is_break(parser.buffer, parser.buffer_pos) { ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ skip_line(parser) ++ } ++ ++ end_mark := parser.mark ++ ++ // Set the indentation level if it was specified. ++ var indent int ++ if increment > 0 { ++ if parser.indent >= 0 { ++ indent = parser.indent + increment ++ } else { ++ indent = increment ++ } ++ } ++ ++ // Scan the leading line breaks and determine the indentation level if needed. ++ var s, leading_break, trailing_breaks []byte ++ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { ++ return false ++ } ++ ++ // Scan the block scalar content. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ var leading_blank, trailing_blank bool ++ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { ++ // We are at the beginning of a non-empty line. ++ ++ // Is it a trailing whitespace? ++ trailing_blank = is_blank(parser.buffer, parser.buffer_pos) ++ ++ // Check if we need to fold the leading line break. ++ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { ++ // Do we need to join the lines by space? ++ if len(trailing_breaks) == 0 { ++ s = append(s, ' ') ++ } ++ } else { ++ s = append(s, leading_break...) ++ } ++ leading_break = leading_break[:0] ++ ++ // Append the remaining line breaks. ++ s = append(s, trailing_breaks...) ++ trailing_breaks = trailing_breaks[:0] ++ ++ // Is it a leading whitespace? ++ leading_blank = is_blank(parser.buffer, parser.buffer_pos) ++ ++ // Consume the current line. ++ for !is_breakz(parser.buffer, parser.buffer_pos) { ++ s = read(parser, s) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ // Consume the line break. ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ ++ leading_break = read_line(parser, leading_break) ++ ++ // Eat the following indentation spaces and line breaks. ++ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { ++ return false ++ } ++ } ++ ++ // Chomp the tail. ++ if chomping != -1 { ++ s = append(s, leading_break...) ++ } ++ if chomping == 1 { ++ s = append(s, trailing_breaks...) ++ } ++ ++ // Create a token. ++ *token = yaml_token_t{ ++ typ: yaml_SCALAR_TOKEN, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ value: s, ++ style: yaml_LITERAL_SCALAR_STYLE, ++ } ++ if !literal { ++ token.style = yaml_FOLDED_SCALAR_STYLE ++ } ++ return true ++} ++ ++// Scan indentation spaces and line breaks for a block scalar. Determine the ++// indentation level if needed. ++func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { ++ *end_mark = parser.mark ++ ++ // Eat the indentation spaces and line breaks. ++ max_indent := 0 ++ for { ++ // Eat the indentation spaces. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { ++ skip(parser) ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ if parser.mark.column > max_indent { ++ max_indent = parser.mark.column ++ } ++ ++ // Check for a tab character messing the indentation. ++ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { ++ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", ++ start_mark, "found a tab character where an indentation space is expected") ++ } ++ ++ // Have we found a non-empty line? ++ if !is_break(parser.buffer, parser.buffer_pos) { ++ break ++ } ++ ++ // Consume the line break. ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ // [Go] Should really be returning breaks instead. ++ *breaks = read_line(parser, *breaks) ++ *end_mark = parser.mark ++ } ++ ++ // Determine the indentation level if needed. ++ if *indent == 0 { ++ *indent = max_indent ++ if *indent < parser.indent+1 { ++ *indent = parser.indent + 1 ++ } ++ if *indent < 1 { ++ *indent = 1 ++ } ++ } ++ return true ++} ++ ++// Scan a quoted scalar. ++func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { ++ // Eat the left quote. ++ start_mark := parser.mark ++ skip(parser) ++ ++ // Consume the content of the quoted scalar. ++ var s, leading_break, trailing_breaks, whitespaces []byte ++ for { ++ // Check that there are no document indicators at the beginning of the line. ++ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { ++ return false ++ } ++ ++ if parser.mark.column == 0 && ++ ((parser.buffer[parser.buffer_pos+0] == '-' && ++ parser.buffer[parser.buffer_pos+1] == '-' && ++ parser.buffer[parser.buffer_pos+2] == '-') || ++ (parser.buffer[parser.buffer_pos+0] == '.' && ++ parser.buffer[parser.buffer_pos+1] == '.' && ++ parser.buffer[parser.buffer_pos+2] == '.')) && ++ is_blankz(parser.buffer, parser.buffer_pos+3) { ++ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", ++ start_mark, "found unexpected document indicator") ++ return false ++ } ++ ++ // Check for EOF. ++ if is_z(parser.buffer, parser.buffer_pos) { ++ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", ++ start_mark, "found unexpected end of stream") ++ return false ++ } ++ ++ // Consume non-blank characters. ++ leading_blanks := false ++ for !is_blankz(parser.buffer, parser.buffer_pos) { ++ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { ++ // Is is an escaped single quote. ++ s = append(s, '\'') ++ skip(parser) ++ skip(parser) ++ ++ } else if single && parser.buffer[parser.buffer_pos] == '\'' { ++ // It is a right single quote. ++ break ++ } else if !single && parser.buffer[parser.buffer_pos] == '"' { ++ // It is a right double quote. ++ break ++ ++ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { ++ // It is an escaped line break. ++ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { ++ return false ++ } ++ skip(parser) ++ skip_line(parser) ++ leading_blanks = true ++ break ++ ++ } else if !single && parser.buffer[parser.buffer_pos] == '\\' { ++ // It is an escape sequence. ++ code_length := 0 ++ ++ // Check the escape character. ++ switch parser.buffer[parser.buffer_pos+1] { ++ case '0': ++ s = append(s, 0) ++ case 'a': ++ s = append(s, '\x07') ++ case 'b': ++ s = append(s, '\x08') ++ case 't', '\t': ++ s = append(s, '\x09') ++ case 'n': ++ s = append(s, '\x0A') ++ case 'v': ++ s = append(s, '\x0B') ++ case 'f': ++ s = append(s, '\x0C') ++ case 'r': ++ s = append(s, '\x0D') ++ case 'e': ++ s = append(s, '\x1B') ++ case ' ': ++ s = append(s, '\x20') ++ case '"': ++ s = append(s, '"') ++ case '\'': ++ s = append(s, '\'') ++ case '\\': ++ s = append(s, '\\') ++ case 'N': // NEL (#x85) ++ s = append(s, '\xC2') ++ s = append(s, '\x85') ++ case '_': // #xA0 ++ s = append(s, '\xC2') ++ s = append(s, '\xA0') ++ case 'L': // LS (#x2028) ++ s = append(s, '\xE2') ++ s = append(s, '\x80') ++ s = append(s, '\xA8') ++ case 'P': // PS (#x2029) ++ s = append(s, '\xE2') ++ s = append(s, '\x80') ++ s = append(s, '\xA9') ++ case 'x': ++ code_length = 2 ++ case 'u': ++ code_length = 4 ++ case 'U': ++ code_length = 8 ++ default: ++ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", ++ start_mark, "found unknown escape character") ++ return false ++ } ++ ++ skip(parser) ++ skip(parser) ++ ++ // Consume an arbitrary escape code. ++ if code_length > 0 { ++ var value int ++ ++ // Scan the character value. ++ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { ++ return false ++ } ++ for k := 0; k < code_length; k++ { ++ if !is_hex(parser.buffer, parser.buffer_pos+k) { ++ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", ++ start_mark, "did not find expected hexdecimal number") ++ return false ++ } ++ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) ++ } ++ ++ // Check the value and write the character. ++ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { ++ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", ++ start_mark, "found invalid Unicode character escape code") ++ return false ++ } ++ if value <= 0x7F { ++ s = append(s, byte(value)) ++ } else if value <= 0x7FF { ++ s = append(s, byte(0xC0+(value>>6))) ++ s = append(s, byte(0x80+(value&0x3F))) ++ } else if value <= 0xFFFF { ++ s = append(s, byte(0xE0+(value>>12))) ++ s = append(s, byte(0x80+((value>>6)&0x3F))) ++ s = append(s, byte(0x80+(value&0x3F))) ++ } else { ++ s = append(s, byte(0xF0+(value>>18))) ++ s = append(s, byte(0x80+((value>>12)&0x3F))) ++ s = append(s, byte(0x80+((value>>6)&0x3F))) ++ s = append(s, byte(0x80+(value&0x3F))) ++ } ++ ++ // Advance the pointer. ++ for k := 0; k < code_length; k++ { ++ skip(parser) ++ } ++ } ++ } else { ++ // It is a non-escaped non-blank character. ++ s = read(parser, s) ++ } ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ } ++ ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ ++ // Check if we are at the end of the scalar. ++ if single { ++ if parser.buffer[parser.buffer_pos] == '\'' { ++ break ++ } ++ } else { ++ if parser.buffer[parser.buffer_pos] == '"' { ++ break ++ } ++ } ++ ++ // Consume blank characters. ++ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { ++ if is_blank(parser.buffer, parser.buffer_pos) { ++ // Consume a space or a tab character. ++ if !leading_blanks { ++ whitespaces = read(parser, whitespaces) ++ } else { ++ skip(parser) ++ } ++ } else { ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ ++ // Check if it is a first line break. ++ if !leading_blanks { ++ whitespaces = whitespaces[:0] ++ leading_break = read_line(parser, leading_break) ++ leading_blanks = true ++ } else { ++ trailing_breaks = read_line(parser, trailing_breaks) ++ } ++ } ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ // Join the whitespaces or fold line breaks. ++ if leading_blanks { ++ // Do we need to fold line breaks? ++ if len(leading_break) > 0 && leading_break[0] == '\n' { ++ if len(trailing_breaks) == 0 { ++ s = append(s, ' ') ++ } else { ++ s = append(s, trailing_breaks...) ++ } ++ } else { ++ s = append(s, leading_break...) ++ s = append(s, trailing_breaks...) ++ } ++ trailing_breaks = trailing_breaks[:0] ++ leading_break = leading_break[:0] ++ } else { ++ s = append(s, whitespaces...) ++ whitespaces = whitespaces[:0] ++ } ++ } ++ ++ // Eat the right quote. ++ skip(parser) ++ end_mark := parser.mark ++ ++ // Create a token. ++ *token = yaml_token_t{ ++ typ: yaml_SCALAR_TOKEN, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ value: s, ++ style: yaml_SINGLE_QUOTED_SCALAR_STYLE, ++ } ++ if !single { ++ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE ++ } ++ return true ++} ++ ++// Scan a plain scalar. ++func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { ++ ++ var s, leading_break, trailing_breaks, whitespaces []byte ++ var leading_blanks bool ++ var indent = parser.indent + 1 ++ ++ start_mark := parser.mark ++ end_mark := parser.mark ++ ++ // Consume the content of the plain scalar. ++ for { ++ // Check for a document indicator. ++ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { ++ return false ++ } ++ if parser.mark.column == 0 && ++ ((parser.buffer[parser.buffer_pos+0] == '-' && ++ parser.buffer[parser.buffer_pos+1] == '-' && ++ parser.buffer[parser.buffer_pos+2] == '-') || ++ (parser.buffer[parser.buffer_pos+0] == '.' && ++ parser.buffer[parser.buffer_pos+1] == '.' && ++ parser.buffer[parser.buffer_pos+2] == '.')) && ++ is_blankz(parser.buffer, parser.buffer_pos+3) { ++ break ++ } ++ ++ // Check for a comment. ++ if parser.buffer[parser.buffer_pos] == '#' { ++ break ++ } ++ ++ // Consume non-blank characters. ++ for !is_blankz(parser.buffer, parser.buffer_pos) { ++ ++ // Check for indicators that may end a plain scalar. ++ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || ++ (parser.flow_level > 0 && ++ (parser.buffer[parser.buffer_pos] == ',' || ++ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || ++ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || ++ parser.buffer[parser.buffer_pos] == '}')) { ++ break ++ } ++ ++ // Check if we need to join whitespaces and breaks. ++ if leading_blanks || len(whitespaces) > 0 { ++ if leading_blanks { ++ // Do we need to fold line breaks? ++ if leading_break[0] == '\n' { ++ if len(trailing_breaks) == 0 { ++ s = append(s, ' ') ++ } else { ++ s = append(s, trailing_breaks...) ++ } ++ } else { ++ s = append(s, leading_break...) ++ s = append(s, trailing_breaks...) ++ } ++ trailing_breaks = trailing_breaks[:0] ++ leading_break = leading_break[:0] ++ leading_blanks = false ++ } else { ++ s = append(s, whitespaces...) ++ whitespaces = whitespaces[:0] ++ } ++ } ++ ++ // Copy the character. ++ s = read(parser, s) ++ ++ end_mark = parser.mark ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ } ++ ++ // Is it the end? ++ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { ++ break ++ } ++ ++ // Consume blank characters. ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ ++ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { ++ if is_blank(parser.buffer, parser.buffer_pos) { ++ ++ // Check for tab characters that abuse indentation. ++ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { ++ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", ++ start_mark, "found a tab character that violates indentation") ++ return false ++ } ++ ++ // Consume a space or a tab character. ++ if !leading_blanks { ++ whitespaces = read(parser, whitespaces) ++ } else { ++ skip(parser) ++ } ++ } else { ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ ++ // Check if it is a first line break. ++ if !leading_blanks { ++ whitespaces = whitespaces[:0] ++ leading_break = read_line(parser, leading_break) ++ leading_blanks = true ++ } else { ++ trailing_breaks = read_line(parser, trailing_breaks) ++ } ++ } ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ } ++ ++ // Check indentation level. ++ if parser.flow_level == 0 && parser.mark.column < indent { ++ break ++ } ++ } ++ ++ // Create a token. ++ *token = yaml_token_t{ ++ typ: yaml_SCALAR_TOKEN, ++ start_mark: start_mark, ++ end_mark: end_mark, ++ value: s, ++ style: yaml_PLAIN_SCALAR_STYLE, ++ } ++ ++ // Note that we change the 'simple_key_allowed' flag. ++ if leading_blanks { ++ parser.simple_key_allowed = true ++ } ++ return true ++} ++ ++func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { ++ if parser.newlines > 0 { ++ return true ++ } ++ ++ var start_mark yaml_mark_t ++ var text []byte ++ ++ for peek := 0; peek < 512; peek++ { ++ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { ++ break ++ } ++ if is_blank(parser.buffer, parser.buffer_pos+peek) { ++ continue ++ } ++ if parser.buffer[parser.buffer_pos+peek] == '#' { ++ seen := parser.mark.index+peek ++ for { ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ if is_breakz(parser.buffer, parser.buffer_pos) { ++ if parser.mark.index >= seen { ++ break ++ } ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ skip_line(parser) ++ } else if parser.mark.index >= seen { ++ if len(text) == 0 { ++ start_mark = parser.mark ++ } ++ text = read(parser, text) ++ } else { ++ skip(parser) ++ } ++ } ++ } ++ break ++ } ++ if len(text) > 0 { ++ parser.comments = append(parser.comments, yaml_comment_t{ ++ token_mark: token_mark, ++ start_mark: start_mark, ++ line: text, ++ }) ++ } ++ return true ++} ++ ++func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { ++ token := parser.tokens[len(parser.tokens)-1] ++ ++ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { ++ token = parser.tokens[len(parser.tokens)-2] ++ } ++ ++ var token_mark = token.start_mark ++ var start_mark yaml_mark_t ++ var next_indent = parser.indent ++ if next_indent < 0 { ++ next_indent = 0 ++ } ++ ++ var recent_empty = false ++ var first_empty = parser.newlines <= 1 ++ ++ var line = parser.mark.line ++ var column = parser.mark.column ++ ++ var text []byte ++ ++ // The foot line is the place where a comment must start to ++ // still be considered as a foot of the prior content. ++ // If there's some content in the currently parsed line, then ++ // the foot is the line below it. ++ var foot_line = -1 ++ if scan_mark.line > 0 { ++ foot_line = parser.mark.line-parser.newlines+1 ++ if parser.newlines == 0 && parser.mark.column > 1 { ++ foot_line++ ++ } ++ } ++ ++ var peek = 0 ++ for ; peek < 512; peek++ { ++ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { ++ break ++ } ++ column++ ++ if is_blank(parser.buffer, parser.buffer_pos+peek) { ++ continue ++ } ++ c := parser.buffer[parser.buffer_pos+peek] ++ var close_flow = parser.flow_level > 0 && (c == ']' || c == '}') ++ if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) { ++ // Got line break or terminator. ++ if close_flow || !recent_empty { ++ if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) { ++ // This is the first empty line and there were no empty lines before, ++ // so this initial part of the comment is a foot of the prior token ++ // instead of being a head for the following one. Split it up. ++ // Alternatively, this might also be the last comment inside a flow ++ // scope, so it must be a footer. ++ if len(text) > 0 { ++ if start_mark.column-1 < next_indent { ++ // If dedented it's unrelated to the prior token. ++ token_mark = start_mark ++ } ++ parser.comments = append(parser.comments, yaml_comment_t{ ++ scan_mark: scan_mark, ++ token_mark: token_mark, ++ start_mark: start_mark, ++ end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, ++ foot: text, ++ }) ++ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} ++ token_mark = scan_mark ++ text = nil ++ } ++ } else { ++ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { ++ text = append(text, '\n') ++ } ++ } ++ } ++ if !is_break(parser.buffer, parser.buffer_pos+peek) { ++ break ++ } ++ first_empty = false ++ recent_empty = true ++ column = 0 ++ line++ ++ continue ++ } ++ ++ if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) { ++ // The comment at the different indentation is a foot of the ++ // preceding data rather than a head of the upcoming one. ++ parser.comments = append(parser.comments, yaml_comment_t{ ++ scan_mark: scan_mark, ++ token_mark: token_mark, ++ start_mark: start_mark, ++ end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, ++ foot: text, ++ }) ++ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} ++ token_mark = scan_mark ++ text = nil ++ } ++ ++ if parser.buffer[parser.buffer_pos+peek] != '#' { ++ break ++ } ++ ++ if len(text) == 0 { ++ start_mark = yaml_mark_t{parser.mark.index + peek, line, column} ++ } else { ++ text = append(text, '\n') ++ } ++ ++ recent_empty = false ++ ++ // Consume until after the consumed comment line. ++ seen := parser.mark.index+peek ++ for { ++ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { ++ return false ++ } ++ if is_breakz(parser.buffer, parser.buffer_pos) { ++ if parser.mark.index >= seen { ++ break ++ } ++ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { ++ return false ++ } ++ skip_line(parser) ++ } else if parser.mark.index >= seen { ++ text = read(parser, text) ++ } else { ++ skip(parser) ++ } ++ } ++ ++ peek = 0 ++ column = 0 ++ line = parser.mark.line ++ next_indent = parser.indent ++ if next_indent < 0 { ++ next_indent = 0 ++ } ++ } ++ ++ if len(text) > 0 { ++ parser.comments = append(parser.comments, yaml_comment_t{ ++ scan_mark: scan_mark, ++ token_mark: start_mark, ++ start_mark: start_mark, ++ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, ++ head: text, ++ }) ++ } ++ return true ++} +diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go +new file mode 100644 +index 000000000000..9210ece7e972 +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/sorter.go +@@ -0,0 +1,134 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package yaml ++ ++import ( ++ "reflect" ++ "unicode" ++) ++ ++type keyList []reflect.Value ++ ++func (l keyList) Len() int { return len(l) } ++func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } ++func (l keyList) Less(i, j int) bool { ++ a := l[i] ++ b := l[j] ++ ak := a.Kind() ++ bk := b.Kind() ++ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { ++ a = a.Elem() ++ ak = a.Kind() ++ } ++ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { ++ b = b.Elem() ++ bk = b.Kind() ++ } ++ af, aok := keyFloat(a) ++ bf, bok := keyFloat(b) ++ if aok && bok { ++ if af != bf { ++ return af < bf ++ } ++ if ak != bk { ++ return ak < bk ++ } ++ return numLess(a, b) ++ } ++ if ak != reflect.String || bk != reflect.String { ++ return ak < bk ++ } ++ ar, br := []rune(a.String()), []rune(b.String()) ++ digits := false ++ for i := 0; i < len(ar) && i < len(br); i++ { ++ if ar[i] == br[i] { ++ digits = unicode.IsDigit(ar[i]) ++ continue ++ } ++ al := unicode.IsLetter(ar[i]) ++ bl := unicode.IsLetter(br[i]) ++ if al && bl { ++ return ar[i] < br[i] ++ } ++ if al || bl { ++ if digits { ++ return al ++ } else { ++ return bl ++ } ++ } ++ var ai, bi int ++ var an, bn int64 ++ if ar[i] == '0' || br[i] == '0' { ++ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { ++ if ar[j] != '0' { ++ an = 1 ++ bn = 1 ++ break ++ } ++ } ++ } ++ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { ++ an = an*10 + int64(ar[ai]-'0') ++ } ++ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { ++ bn = bn*10 + int64(br[bi]-'0') ++ } ++ if an != bn { ++ return an < bn ++ } ++ if ai != bi { ++ return ai < bi ++ } ++ return ar[i] < br[i] ++ } ++ return len(ar) < len(br) ++} ++ ++// keyFloat returns a float value for v if it is a number/bool ++// and whether it is a number/bool or not. ++func keyFloat(v reflect.Value) (f float64, ok bool) { ++ switch v.Kind() { ++ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ++ return float64(v.Int()), true ++ case reflect.Float32, reflect.Float64: ++ return v.Float(), true ++ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: ++ return float64(v.Uint()), true ++ case reflect.Bool: ++ if v.Bool() { ++ return 1, true ++ } ++ return 0, true ++ } ++ return 0, false ++} ++ ++// numLess returns whether a < b. ++// a and b must necessarily have the same kind. ++func numLess(a, b reflect.Value) bool { ++ switch a.Kind() { ++ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ++ return a.Int() < b.Int() ++ case reflect.Float32, reflect.Float64: ++ return a.Float() < b.Float() ++ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: ++ return a.Uint() < b.Uint() ++ case reflect.Bool: ++ return !a.Bool() && b.Bool() ++ } ++ panic("not a number") ++} +diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go +new file mode 100644 +index 000000000000..b8a116bf9a22 +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/writerc.go +@@ -0,0 +1,48 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// Copyright (c) 2006-2010 Kirill Simonov ++// ++// Permission is hereby granted, free of charge, to any person obtaining a copy of ++// this software and associated documentation files (the "Software"), to deal in ++// the Software without restriction, including without limitation the rights to ++// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++// of the Software, and to permit persons to whom the Software is furnished to do ++// so, subject to the following conditions: ++// ++// The above copyright notice and this permission notice shall be included in all ++// copies or substantial portions of the Software. ++// ++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++// SOFTWARE. ++ ++package yaml ++ ++// Set the writer error and return false. ++func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { ++ emitter.error = yaml_WRITER_ERROR ++ emitter.problem = problem ++ return false ++} ++ ++// Flush the output buffer. ++func yaml_emitter_flush(emitter *yaml_emitter_t) bool { ++ if emitter.write_handler == nil { ++ panic("write handler not set") ++ } ++ ++ // Check if the buffer is empty. ++ if emitter.buffer_pos == 0 { ++ return true ++ } ++ ++ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { ++ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) ++ } ++ emitter.buffer_pos = 0 ++ return true ++} +diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go +new file mode 100644 +index 000000000000..8cec6da48d3e +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/yaml.go +@@ -0,0 +1,698 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++// Package yaml implements YAML support for the Go language. ++// ++// Source code and other details for the project are available at GitHub: ++// ++// https://github.com/go-yaml/yaml ++// ++package yaml ++ ++import ( ++ "errors" ++ "fmt" ++ "io" ++ "reflect" ++ "strings" ++ "sync" ++ "unicode/utf8" ++) ++ ++// The Unmarshaler interface may be implemented by types to customize their ++// behavior when being unmarshaled from a YAML document. ++type Unmarshaler interface { ++ UnmarshalYAML(value *Node) error ++} ++ ++type obsoleteUnmarshaler interface { ++ UnmarshalYAML(unmarshal func(interface{}) error) error ++} ++ ++// The Marshaler interface may be implemented by types to customize their ++// behavior when being marshaled into a YAML document. The returned value ++// is marshaled in place of the original value implementing Marshaler. ++// ++// If an error is returned by MarshalYAML, the marshaling procedure stops ++// and returns with the provided error. ++type Marshaler interface { ++ MarshalYAML() (interface{}, error) ++} ++ ++// Unmarshal decodes the first document found within the in byte slice ++// and assigns decoded values into the out value. ++// ++// Maps and pointers (to a struct, string, int, etc) are accepted as out ++// values. If an internal pointer within a struct is not initialized, ++// the yaml package will initialize it if necessary for unmarshalling ++// the provided data. The out parameter must not be nil. ++// ++// The type of the decoded values should be compatible with the respective ++// values in out. If one or more values cannot be decoded due to a type ++// mismatches, decoding continues partially until the end of the YAML ++// content, and a *yaml.TypeError is returned with details for all ++// missed values. ++// ++// Struct fields are only unmarshalled if they are exported (have an ++// upper case first letter), and are unmarshalled using the field name ++// lowercased as the default key. Custom keys may be defined via the ++// "yaml" name in the field tag: the content preceding the first comma ++// is used as the key, and the following comma-separated options are ++// used to tweak the marshalling process (see Marshal). ++// Conflicting names result in a runtime error. ++// ++// For example: ++// ++// type T struct { ++// F int `yaml:"a,omitempty"` ++// B int ++// } ++// var t T ++// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) ++// ++// See the documentation of Marshal for the format of tags and a list of ++// supported tag options. ++// ++func Unmarshal(in []byte, out interface{}) (err error) { ++ return unmarshal(in, out, false) ++} ++ ++// A Decoder reads and decodes YAML values from an input stream. ++type Decoder struct { ++ parser *parser ++ knownFields bool ++} ++ ++// NewDecoder returns a new decoder that reads from r. ++// ++// The decoder introduces its own buffering and may read ++// data from r beyond the YAML values requested. ++func NewDecoder(r io.Reader) *Decoder { ++ return &Decoder{ ++ parser: newParserFromReader(r), ++ } ++} ++ ++// KnownFields ensures that the keys in decoded mappings to ++// exist as fields in the struct being decoded into. ++func (dec *Decoder) KnownFields(enable bool) { ++ dec.knownFields = enable ++} ++ ++// Decode reads the next YAML-encoded value from its input ++// and stores it in the value pointed to by v. ++// ++// See the documentation for Unmarshal for details about the ++// conversion of YAML into a Go value. ++func (dec *Decoder) Decode(v interface{}) (err error) { ++ d := newDecoder() ++ d.knownFields = dec.knownFields ++ defer handleErr(&err) ++ node := dec.parser.parse() ++ if node == nil { ++ return io.EOF ++ } ++ out := reflect.ValueOf(v) ++ if out.Kind() == reflect.Ptr && !out.IsNil() { ++ out = out.Elem() ++ } ++ d.unmarshal(node, out) ++ if len(d.terrors) > 0 { ++ return &TypeError{d.terrors} ++ } ++ return nil ++} ++ ++// Decode decodes the node and stores its data into the value pointed to by v. ++// ++// See the documentation for Unmarshal for details about the ++// conversion of YAML into a Go value. ++func (n *Node) Decode(v interface{}) (err error) { ++ d := newDecoder() ++ defer handleErr(&err) ++ out := reflect.ValueOf(v) ++ if out.Kind() == reflect.Ptr && !out.IsNil() { ++ out = out.Elem() ++ } ++ d.unmarshal(n, out) ++ if len(d.terrors) > 0 { ++ return &TypeError{d.terrors} ++ } ++ return nil ++} ++ ++func unmarshal(in []byte, out interface{}, strict bool) (err error) { ++ defer handleErr(&err) ++ d := newDecoder() ++ p := newParser(in) ++ defer p.destroy() ++ node := p.parse() ++ if node != nil { ++ v := reflect.ValueOf(out) ++ if v.Kind() == reflect.Ptr && !v.IsNil() { ++ v = v.Elem() ++ } ++ d.unmarshal(node, v) ++ } ++ if len(d.terrors) > 0 { ++ return &TypeError{d.terrors} ++ } ++ return nil ++} ++ ++// Marshal serializes the value provided into a YAML document. The structure ++// of the generated document will reflect the structure of the value itself. ++// Maps and pointers (to struct, string, int, etc) are accepted as the in value. ++// ++// Struct fields are only marshalled if they are exported (have an upper case ++// first letter), and are marshalled using the field name lowercased as the ++// default key. Custom keys may be defined via the "yaml" name in the field ++// tag: the content preceding the first comma is used as the key, and the ++// following comma-separated options are used to tweak the marshalling process. ++// Conflicting names result in a runtime error. ++// ++// The field tag format accepted is: ++// ++// `(...) yaml:"[][,[,]]" (...)` ++// ++// The following flags are currently supported: ++// ++// omitempty Only include the field if it's not set to the zero ++// value for the type or to empty slices or maps. ++// Zero valued structs will be omitted if all their public ++// fields are zero, unless they implement an IsZero ++// method (see the IsZeroer interface type), in which ++// case the field will be excluded if IsZero returns true. ++// ++// flow Marshal using a flow style (useful for structs, ++// sequences and maps). ++// ++// inline Inline the field, which must be a struct or a map, ++// causing all of its fields or keys to be processed as if ++// they were part of the outer struct. For maps, keys must ++// not conflict with the yaml keys of other struct fields. ++// ++// In addition, if the key is "-", the field is ignored. ++// ++// For example: ++// ++// type T struct { ++// F int `yaml:"a,omitempty"` ++// B int ++// } ++// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" ++// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" ++// ++func Marshal(in interface{}) (out []byte, err error) { ++ defer handleErr(&err) ++ e := newEncoder() ++ defer e.destroy() ++ e.marshalDoc("", reflect.ValueOf(in)) ++ e.finish() ++ out = e.out ++ return ++} ++ ++// An Encoder writes YAML values to an output stream. ++type Encoder struct { ++ encoder *encoder ++} ++ ++// NewEncoder returns a new encoder that writes to w. ++// The Encoder should be closed after use to flush all data ++// to w. ++func NewEncoder(w io.Writer) *Encoder { ++ return &Encoder{ ++ encoder: newEncoderWithWriter(w), ++ } ++} ++ ++// Encode writes the YAML encoding of v to the stream. ++// If multiple items are encoded to the stream, the ++// second and subsequent document will be preceded ++// with a "---" document separator, but the first will not. ++// ++// See the documentation for Marshal for details about the conversion of Go ++// values to YAML. ++func (e *Encoder) Encode(v interface{}) (err error) { ++ defer handleErr(&err) ++ e.encoder.marshalDoc("", reflect.ValueOf(v)) ++ return nil ++} ++ ++// Encode encodes value v and stores its representation in n. ++// ++// See the documentation for Marshal for details about the ++// conversion of Go values into YAML. ++func (n *Node) Encode(v interface{}) (err error) { ++ defer handleErr(&err) ++ e := newEncoder() ++ defer e.destroy() ++ e.marshalDoc("", reflect.ValueOf(v)) ++ e.finish() ++ p := newParser(e.out) ++ p.textless = true ++ defer p.destroy() ++ doc := p.parse() ++ *n = *doc.Content[0] ++ return nil ++} ++ ++// SetIndent changes the used indentation used when encoding. ++func (e *Encoder) SetIndent(spaces int) { ++ if spaces < 0 { ++ panic("yaml: cannot indent to a negative number of spaces") ++ } ++ e.encoder.indent = spaces ++} ++ ++// Close closes the encoder by writing any remaining data. ++// It does not write a stream terminating string "...". ++func (e *Encoder) Close() (err error) { ++ defer handleErr(&err) ++ e.encoder.finish() ++ return nil ++} ++ ++func handleErr(err *error) { ++ if v := recover(); v != nil { ++ if e, ok := v.(yamlError); ok { ++ *err = e.err ++ } else { ++ panic(v) ++ } ++ } ++} ++ ++type yamlError struct { ++ err error ++} ++ ++func fail(err error) { ++ panic(yamlError{err}) ++} ++ ++func failf(format string, args ...interface{}) { ++ panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) ++} ++ ++// A TypeError is returned by Unmarshal when one or more fields in ++// the YAML document cannot be properly decoded into the requested ++// types. When this error is returned, the value is still ++// unmarshaled partially. ++type TypeError struct { ++ Errors []string ++} ++ ++func (e *TypeError) Error() string { ++ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) ++} ++ ++type Kind uint32 ++ ++const ( ++ DocumentNode Kind = 1 << iota ++ SequenceNode ++ MappingNode ++ ScalarNode ++ AliasNode ++) ++ ++type Style uint32 ++ ++const ( ++ TaggedStyle Style = 1 << iota ++ DoubleQuotedStyle ++ SingleQuotedStyle ++ LiteralStyle ++ FoldedStyle ++ FlowStyle ++) ++ ++// Node represents an element in the YAML document hierarchy. While documents ++// are typically encoded and decoded into higher level types, such as structs ++// and maps, Node is an intermediate representation that allows detailed ++// control over the content being decoded or encoded. ++// ++// It's worth noting that although Node offers access into details such as ++// line numbers, colums, and comments, the content when re-encoded will not ++// have its original textual representation preserved. An effort is made to ++// render the data plesantly, and to preserve comments near the data they ++// describe, though. ++// ++// Values that make use of the Node type interact with the yaml package in the ++// same way any other type would do, by encoding and decoding yaml data ++// directly or indirectly into them. ++// ++// For example: ++// ++// var person struct { ++// Name string ++// Address yaml.Node ++// } ++// err := yaml.Unmarshal(data, &person) ++// ++// Or by itself: ++// ++// var person Node ++// err := yaml.Unmarshal(data, &person) ++// ++type Node struct { ++ // Kind defines whether the node is a document, a mapping, a sequence, ++ // a scalar value, or an alias to another node. The specific data type of ++ // scalar nodes may be obtained via the ShortTag and LongTag methods. ++ Kind Kind ++ ++ // Style allows customizing the apperance of the node in the tree. ++ Style Style ++ ++ // Tag holds the YAML tag defining the data type for the value. ++ // When decoding, this field will always be set to the resolved tag, ++ // even when it wasn't explicitly provided in the YAML content. ++ // When encoding, if this field is unset the value type will be ++ // implied from the node properties, and if it is set, it will only ++ // be serialized into the representation if TaggedStyle is used or ++ // the implicit tag diverges from the provided one. ++ Tag string ++ ++ // Value holds the unescaped and unquoted represenation of the value. ++ Value string ++ ++ // Anchor holds the anchor name for this node, which allows aliases to point to it. ++ Anchor string ++ ++ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. ++ Alias *Node ++ ++ // Content holds contained nodes for documents, mappings, and sequences. ++ Content []*Node ++ ++ // HeadComment holds any comments in the lines preceding the node and ++ // not separated by an empty line. ++ HeadComment string ++ ++ // LineComment holds any comments at the end of the line where the node is in. ++ LineComment string ++ ++ // FootComment holds any comments following the node and before empty lines. ++ FootComment string ++ ++ // Line and Column hold the node position in the decoded YAML text. ++ // These fields are not respected when encoding the node. ++ Line int ++ Column int ++} ++ ++// IsZero returns whether the node has all of its fields unset. ++func (n *Node) IsZero() bool { ++ return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && ++ n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 ++} ++ ++ ++// LongTag returns the long form of the tag that indicates the data type for ++// the node. If the Tag field isn't explicitly defined, one will be computed ++// based on the node properties. ++func (n *Node) LongTag() string { ++ return longTag(n.ShortTag()) ++} ++ ++// ShortTag returns the short form of the YAML tag that indicates data type for ++// the node. If the Tag field isn't explicitly defined, one will be computed ++// based on the node properties. ++func (n *Node) ShortTag() string { ++ if n.indicatedString() { ++ return strTag ++ } ++ if n.Tag == "" || n.Tag == "!" { ++ switch n.Kind { ++ case MappingNode: ++ return mapTag ++ case SequenceNode: ++ return seqTag ++ case AliasNode: ++ if n.Alias != nil { ++ return n.Alias.ShortTag() ++ } ++ case ScalarNode: ++ tag, _ := resolve("", n.Value) ++ return tag ++ case 0: ++ // Special case to make the zero value convenient. ++ if n.IsZero() { ++ return nullTag ++ } ++ } ++ return "" ++ } ++ return shortTag(n.Tag) ++} ++ ++func (n *Node) indicatedString() bool { ++ return n.Kind == ScalarNode && ++ (shortTag(n.Tag) == strTag || ++ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) ++} ++ ++// SetString is a convenience function that sets the node to a string value ++// and defines its style in a pleasant way depending on its content. ++func (n *Node) SetString(s string) { ++ n.Kind = ScalarNode ++ if utf8.ValidString(s) { ++ n.Value = s ++ n.Tag = strTag ++ } else { ++ n.Value = encodeBase64(s) ++ n.Tag = binaryTag ++ } ++ if strings.Contains(n.Value, "\n") { ++ n.Style = LiteralStyle ++ } ++} ++ ++// -------------------------------------------------------------------------- ++// Maintain a mapping of keys to structure field indexes ++ ++// The code in this section was copied from mgo/bson. ++ ++// structInfo holds details for the serialization of fields of ++// a given struct. ++type structInfo struct { ++ FieldsMap map[string]fieldInfo ++ FieldsList []fieldInfo ++ ++ // InlineMap is the number of the field in the struct that ++ // contains an ,inline map, or -1 if there's none. ++ InlineMap int ++ ++ // InlineUnmarshalers holds indexes to inlined fields that ++ // contain unmarshaler values. ++ InlineUnmarshalers [][]int ++} ++ ++type fieldInfo struct { ++ Key string ++ Num int ++ OmitEmpty bool ++ Flow bool ++ // Id holds the unique field identifier, so we can cheaply ++ // check for field duplicates without maintaining an extra map. ++ Id int ++ ++ // Inline holds the field index if the field is part of an inlined struct. ++ Inline []int ++} ++ ++var structMap = make(map[reflect.Type]*structInfo) ++var fieldMapMutex sync.RWMutex ++var unmarshalerType reflect.Type ++ ++func init() { ++ var v Unmarshaler ++ unmarshalerType = reflect.ValueOf(&v).Elem().Type() ++} ++ ++func getStructInfo(st reflect.Type) (*structInfo, error) { ++ fieldMapMutex.RLock() ++ sinfo, found := structMap[st] ++ fieldMapMutex.RUnlock() ++ if found { ++ return sinfo, nil ++ } ++ ++ n := st.NumField() ++ fieldsMap := make(map[string]fieldInfo) ++ fieldsList := make([]fieldInfo, 0, n) ++ inlineMap := -1 ++ inlineUnmarshalers := [][]int(nil) ++ for i := 0; i != n; i++ { ++ field := st.Field(i) ++ if field.PkgPath != "" && !field.Anonymous { ++ continue // Private field ++ } ++ ++ info := fieldInfo{Num: i} ++ ++ tag := field.Tag.Get("yaml") ++ if tag == "" && strings.Index(string(field.Tag), ":") < 0 { ++ tag = string(field.Tag) ++ } ++ if tag == "-" { ++ continue ++ } ++ ++ inline := false ++ fields := strings.Split(tag, ",") ++ if len(fields) > 1 { ++ for _, flag := range fields[1:] { ++ switch flag { ++ case "omitempty": ++ info.OmitEmpty = true ++ case "flow": ++ info.Flow = true ++ case "inline": ++ inline = true ++ default: ++ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) ++ } ++ } ++ tag = fields[0] ++ } ++ ++ if inline { ++ switch field.Type.Kind() { ++ case reflect.Map: ++ if inlineMap >= 0 { ++ return nil, errors.New("multiple ,inline maps in struct " + st.String()) ++ } ++ if field.Type.Key() != reflect.TypeOf("") { ++ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) ++ } ++ inlineMap = info.Num ++ case reflect.Struct, reflect.Ptr: ++ ftype := field.Type ++ for ftype.Kind() == reflect.Ptr { ++ ftype = ftype.Elem() ++ } ++ if ftype.Kind() != reflect.Struct { ++ return nil, errors.New("option ,inline may only be used on a struct or map field") ++ } ++ if reflect.PtrTo(ftype).Implements(unmarshalerType) { ++ inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) ++ } else { ++ sinfo, err := getStructInfo(ftype) ++ if err != nil { ++ return nil, err ++ } ++ for _, index := range sinfo.InlineUnmarshalers { ++ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) ++ } ++ for _, finfo := range sinfo.FieldsList { ++ if _, found := fieldsMap[finfo.Key]; found { ++ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() ++ return nil, errors.New(msg) ++ } ++ if finfo.Inline == nil { ++ finfo.Inline = []int{i, finfo.Num} ++ } else { ++ finfo.Inline = append([]int{i}, finfo.Inline...) ++ } ++ finfo.Id = len(fieldsList) ++ fieldsMap[finfo.Key] = finfo ++ fieldsList = append(fieldsList, finfo) ++ } ++ } ++ default: ++ return nil, errors.New("option ,inline may only be used on a struct or map field") ++ } ++ continue ++ } ++ ++ if tag != "" { ++ info.Key = tag ++ } else { ++ info.Key = strings.ToLower(field.Name) ++ } ++ ++ if _, found = fieldsMap[info.Key]; found { ++ msg := "duplicated key '" + info.Key + "' in struct " + st.String() ++ return nil, errors.New(msg) ++ } ++ ++ info.Id = len(fieldsList) ++ fieldsList = append(fieldsList, info) ++ fieldsMap[info.Key] = info ++ } ++ ++ sinfo = &structInfo{ ++ FieldsMap: fieldsMap, ++ FieldsList: fieldsList, ++ InlineMap: inlineMap, ++ InlineUnmarshalers: inlineUnmarshalers, ++ } ++ ++ fieldMapMutex.Lock() ++ structMap[st] = sinfo ++ fieldMapMutex.Unlock() ++ return sinfo, nil ++} ++ ++// IsZeroer is used to check whether an object is zero to ++// determine whether it should be omitted when marshaling ++// with the omitempty flag. One notable implementation ++// is time.Time. ++type IsZeroer interface { ++ IsZero() bool ++} ++ ++func isZero(v reflect.Value) bool { ++ kind := v.Kind() ++ if z, ok := v.Interface().(IsZeroer); ok { ++ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { ++ return true ++ } ++ return z.IsZero() ++ } ++ switch kind { ++ case reflect.String: ++ return len(v.String()) == 0 ++ case reflect.Interface, reflect.Ptr: ++ return v.IsNil() ++ case reflect.Slice: ++ return v.Len() == 0 ++ case reflect.Map: ++ return v.Len() == 0 ++ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ++ return v.Int() == 0 ++ case reflect.Float32, reflect.Float64: ++ return v.Float() == 0 ++ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: ++ return v.Uint() == 0 ++ case reflect.Bool: ++ return !v.Bool() ++ case reflect.Struct: ++ vt := v.Type() ++ for i := v.NumField() - 1; i >= 0; i-- { ++ if vt.Field(i).PkgPath != "" { ++ continue // Private field ++ } ++ if !isZero(v.Field(i)) { ++ return false ++ } ++ } ++ return true ++ } ++ return false ++} +diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go +new file mode 100644 +index 000000000000..7c6d00770619 +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/yamlh.go +@@ -0,0 +1,807 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// Copyright (c) 2006-2010 Kirill Simonov ++// ++// Permission is hereby granted, free of charge, to any person obtaining a copy of ++// this software and associated documentation files (the "Software"), to deal in ++// the Software without restriction, including without limitation the rights to ++// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++// of the Software, and to permit persons to whom the Software is furnished to do ++// so, subject to the following conditions: ++// ++// The above copyright notice and this permission notice shall be included in all ++// copies or substantial portions of the Software. ++// ++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++// SOFTWARE. ++ ++package yaml ++ ++import ( ++ "fmt" ++ "io" ++) ++ ++// The version directive data. ++type yaml_version_directive_t struct { ++ major int8 // The major version number. ++ minor int8 // The minor version number. ++} ++ ++// The tag directive data. ++type yaml_tag_directive_t struct { ++ handle []byte // The tag handle. ++ prefix []byte // The tag prefix. ++} ++ ++type yaml_encoding_t int ++ ++// The stream encoding. ++const ( ++ // Let the parser choose the encoding. ++ yaml_ANY_ENCODING yaml_encoding_t = iota ++ ++ yaml_UTF8_ENCODING // The default UTF-8 encoding. ++ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. ++ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. ++) ++ ++type yaml_break_t int ++ ++// Line break types. ++const ( ++ // Let the parser choose the break type. ++ yaml_ANY_BREAK yaml_break_t = iota ++ ++ yaml_CR_BREAK // Use CR for line breaks (Mac style). ++ yaml_LN_BREAK // Use LN for line breaks (Unix style). ++ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). ++) ++ ++type yaml_error_type_t int ++ ++// Many bad things could happen with the parser and emitter. ++const ( ++ // No error is produced. ++ yaml_NO_ERROR yaml_error_type_t = iota ++ ++ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. ++ yaml_READER_ERROR // Cannot read or decode the input stream. ++ yaml_SCANNER_ERROR // Cannot scan the input stream. ++ yaml_PARSER_ERROR // Cannot parse the input stream. ++ yaml_COMPOSER_ERROR // Cannot compose a YAML document. ++ yaml_WRITER_ERROR // Cannot write to the output stream. ++ yaml_EMITTER_ERROR // Cannot emit a YAML stream. ++) ++ ++// The pointer position. ++type yaml_mark_t struct { ++ index int // The position index. ++ line int // The position line. ++ column int // The position column. ++} ++ ++// Node Styles ++ ++type yaml_style_t int8 ++ ++type yaml_scalar_style_t yaml_style_t ++ ++// Scalar styles. ++const ( ++ // Let the emitter choose the style. ++ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 ++ ++ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. ++ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. ++ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. ++ yaml_LITERAL_SCALAR_STYLE // The literal scalar style. ++ yaml_FOLDED_SCALAR_STYLE // The folded scalar style. ++) ++ ++type yaml_sequence_style_t yaml_style_t ++ ++// Sequence styles. ++const ( ++ // Let the emitter choose the style. ++ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota ++ ++ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. ++ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. ++) ++ ++type yaml_mapping_style_t yaml_style_t ++ ++// Mapping styles. ++const ( ++ // Let the emitter choose the style. ++ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota ++ ++ yaml_BLOCK_MAPPING_STYLE // The block mapping style. ++ yaml_FLOW_MAPPING_STYLE // The flow mapping style. ++) ++ ++// Tokens ++ ++type yaml_token_type_t int ++ ++// Token types. ++const ( ++ // An empty token. ++ yaml_NO_TOKEN yaml_token_type_t = iota ++ ++ yaml_STREAM_START_TOKEN // A STREAM-START token. ++ yaml_STREAM_END_TOKEN // A STREAM-END token. ++ ++ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. ++ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. ++ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. ++ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. ++ ++ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. ++ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. ++ yaml_BLOCK_END_TOKEN // A BLOCK-END token. ++ ++ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. ++ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. ++ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. ++ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. ++ ++ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. ++ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. ++ yaml_KEY_TOKEN // A KEY token. ++ yaml_VALUE_TOKEN // A VALUE token. ++ ++ yaml_ALIAS_TOKEN // An ALIAS token. ++ yaml_ANCHOR_TOKEN // An ANCHOR token. ++ yaml_TAG_TOKEN // A TAG token. ++ yaml_SCALAR_TOKEN // A SCALAR token. ++) ++ ++func (tt yaml_token_type_t) String() string { ++ switch tt { ++ case yaml_NO_TOKEN: ++ return "yaml_NO_TOKEN" ++ case yaml_STREAM_START_TOKEN: ++ return "yaml_STREAM_START_TOKEN" ++ case yaml_STREAM_END_TOKEN: ++ return "yaml_STREAM_END_TOKEN" ++ case yaml_VERSION_DIRECTIVE_TOKEN: ++ return "yaml_VERSION_DIRECTIVE_TOKEN" ++ case yaml_TAG_DIRECTIVE_TOKEN: ++ return "yaml_TAG_DIRECTIVE_TOKEN" ++ case yaml_DOCUMENT_START_TOKEN: ++ return "yaml_DOCUMENT_START_TOKEN" ++ case yaml_DOCUMENT_END_TOKEN: ++ return "yaml_DOCUMENT_END_TOKEN" ++ case yaml_BLOCK_SEQUENCE_START_TOKEN: ++ return "yaml_BLOCK_SEQUENCE_START_TOKEN" ++ case yaml_BLOCK_MAPPING_START_TOKEN: ++ return "yaml_BLOCK_MAPPING_START_TOKEN" ++ case yaml_BLOCK_END_TOKEN: ++ return "yaml_BLOCK_END_TOKEN" ++ case yaml_FLOW_SEQUENCE_START_TOKEN: ++ return "yaml_FLOW_SEQUENCE_START_TOKEN" ++ case yaml_FLOW_SEQUENCE_END_TOKEN: ++ return "yaml_FLOW_SEQUENCE_END_TOKEN" ++ case yaml_FLOW_MAPPING_START_TOKEN: ++ return "yaml_FLOW_MAPPING_START_TOKEN" ++ case yaml_FLOW_MAPPING_END_TOKEN: ++ return "yaml_FLOW_MAPPING_END_TOKEN" ++ case yaml_BLOCK_ENTRY_TOKEN: ++ return "yaml_BLOCK_ENTRY_TOKEN" ++ case yaml_FLOW_ENTRY_TOKEN: ++ return "yaml_FLOW_ENTRY_TOKEN" ++ case yaml_KEY_TOKEN: ++ return "yaml_KEY_TOKEN" ++ case yaml_VALUE_TOKEN: ++ return "yaml_VALUE_TOKEN" ++ case yaml_ALIAS_TOKEN: ++ return "yaml_ALIAS_TOKEN" ++ case yaml_ANCHOR_TOKEN: ++ return "yaml_ANCHOR_TOKEN" ++ case yaml_TAG_TOKEN: ++ return "yaml_TAG_TOKEN" ++ case yaml_SCALAR_TOKEN: ++ return "yaml_SCALAR_TOKEN" ++ } ++ return "" ++} ++ ++// The token structure. ++type yaml_token_t struct { ++ // The token type. ++ typ yaml_token_type_t ++ ++ // The start/end of the token. ++ start_mark, end_mark yaml_mark_t ++ ++ // The stream encoding (for yaml_STREAM_START_TOKEN). ++ encoding yaml_encoding_t ++ ++ // The alias/anchor/scalar value or tag/tag directive handle ++ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). ++ value []byte ++ ++ // The tag suffix (for yaml_TAG_TOKEN). ++ suffix []byte ++ ++ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). ++ prefix []byte ++ ++ // The scalar style (for yaml_SCALAR_TOKEN). ++ style yaml_scalar_style_t ++ ++ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). ++ major, minor int8 ++} ++ ++// Events ++ ++type yaml_event_type_t int8 ++ ++// Event types. ++const ( ++ // An empty event. ++ yaml_NO_EVENT yaml_event_type_t = iota ++ ++ yaml_STREAM_START_EVENT // A STREAM-START event. ++ yaml_STREAM_END_EVENT // A STREAM-END event. ++ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. ++ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. ++ yaml_ALIAS_EVENT // An ALIAS event. ++ yaml_SCALAR_EVENT // A SCALAR event. ++ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. ++ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. ++ yaml_MAPPING_START_EVENT // A MAPPING-START event. ++ yaml_MAPPING_END_EVENT // A MAPPING-END event. ++ yaml_TAIL_COMMENT_EVENT ++) ++ ++var eventStrings = []string{ ++ yaml_NO_EVENT: "none", ++ yaml_STREAM_START_EVENT: "stream start", ++ yaml_STREAM_END_EVENT: "stream end", ++ yaml_DOCUMENT_START_EVENT: "document start", ++ yaml_DOCUMENT_END_EVENT: "document end", ++ yaml_ALIAS_EVENT: "alias", ++ yaml_SCALAR_EVENT: "scalar", ++ yaml_SEQUENCE_START_EVENT: "sequence start", ++ yaml_SEQUENCE_END_EVENT: "sequence end", ++ yaml_MAPPING_START_EVENT: "mapping start", ++ yaml_MAPPING_END_EVENT: "mapping end", ++ yaml_TAIL_COMMENT_EVENT: "tail comment", ++} ++ ++func (e yaml_event_type_t) String() string { ++ if e < 0 || int(e) >= len(eventStrings) { ++ return fmt.Sprintf("unknown event %d", e) ++ } ++ return eventStrings[e] ++} ++ ++// The event structure. ++type yaml_event_t struct { ++ ++ // The event type. ++ typ yaml_event_type_t ++ ++ // The start and end of the event. ++ start_mark, end_mark yaml_mark_t ++ ++ // The document encoding (for yaml_STREAM_START_EVENT). ++ encoding yaml_encoding_t ++ ++ // The version directive (for yaml_DOCUMENT_START_EVENT). ++ version_directive *yaml_version_directive_t ++ ++ // The list of tag directives (for yaml_DOCUMENT_START_EVENT). ++ tag_directives []yaml_tag_directive_t ++ ++ // The comments ++ head_comment []byte ++ line_comment []byte ++ foot_comment []byte ++ tail_comment []byte ++ ++ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). ++ anchor []byte ++ ++ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). ++ tag []byte ++ ++ // The scalar value (for yaml_SCALAR_EVENT). ++ value []byte ++ ++ // Is the document start/end indicator implicit, or the tag optional? ++ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). ++ implicit bool ++ ++ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). ++ quoted_implicit bool ++ ++ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). ++ style yaml_style_t ++} ++ ++func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } ++func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } ++func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } ++ ++// Nodes ++ ++const ( ++ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. ++ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. ++ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. ++ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. ++ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. ++ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. ++ ++ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. ++ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. ++ ++ // Not in original libyaml. ++ yaml_BINARY_TAG = "tag:yaml.org,2002:binary" ++ yaml_MERGE_TAG = "tag:yaml.org,2002:merge" ++ ++ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. ++ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. ++ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. ++) ++ ++type yaml_node_type_t int ++ ++// Node types. ++const ( ++ // An empty node. ++ yaml_NO_NODE yaml_node_type_t = iota ++ ++ yaml_SCALAR_NODE // A scalar node. ++ yaml_SEQUENCE_NODE // A sequence node. ++ yaml_MAPPING_NODE // A mapping node. ++) ++ ++// An element of a sequence node. ++type yaml_node_item_t int ++ ++// An element of a mapping node. ++type yaml_node_pair_t struct { ++ key int // The key of the element. ++ value int // The value of the element. ++} ++ ++// The node structure. ++type yaml_node_t struct { ++ typ yaml_node_type_t // The node type. ++ tag []byte // The node tag. ++ ++ // The node data. ++ ++ // The scalar parameters (for yaml_SCALAR_NODE). ++ scalar struct { ++ value []byte // The scalar value. ++ length int // The length of the scalar value. ++ style yaml_scalar_style_t // The scalar style. ++ } ++ ++ // The sequence parameters (for YAML_SEQUENCE_NODE). ++ sequence struct { ++ items_data []yaml_node_item_t // The stack of sequence items. ++ style yaml_sequence_style_t // The sequence style. ++ } ++ ++ // The mapping parameters (for yaml_MAPPING_NODE). ++ mapping struct { ++ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). ++ pairs_start *yaml_node_pair_t // The beginning of the stack. ++ pairs_end *yaml_node_pair_t // The end of the stack. ++ pairs_top *yaml_node_pair_t // The top of the stack. ++ style yaml_mapping_style_t // The mapping style. ++ } ++ ++ start_mark yaml_mark_t // The beginning of the node. ++ end_mark yaml_mark_t // The end of the node. ++ ++} ++ ++// The document structure. ++type yaml_document_t struct { ++ ++ // The document nodes. ++ nodes []yaml_node_t ++ ++ // The version directive. ++ version_directive *yaml_version_directive_t ++ ++ // The list of tag directives. ++ tag_directives_data []yaml_tag_directive_t ++ tag_directives_start int // The beginning of the tag directives list. ++ tag_directives_end int // The end of the tag directives list. ++ ++ start_implicit int // Is the document start indicator implicit? ++ end_implicit int // Is the document end indicator implicit? ++ ++ // The start/end of the document. ++ start_mark, end_mark yaml_mark_t ++} ++ ++// The prototype of a read handler. ++// ++// The read handler is called when the parser needs to read more bytes from the ++// source. The handler should write not more than size bytes to the buffer. ++// The number of written bytes should be set to the size_read variable. ++// ++// [in,out] data A pointer to an application data specified by ++// yaml_parser_set_input(). ++// [out] buffer The buffer to write the data from the source. ++// [in] size The size of the buffer. ++// [out] size_read The actual number of bytes read from the source. ++// ++// On success, the handler should return 1. If the handler failed, ++// the returned value should be 0. On EOF, the handler should set the ++// size_read to 0 and return 1. ++type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) ++ ++// This structure holds information about a potential simple key. ++type yaml_simple_key_t struct { ++ possible bool // Is a simple key possible? ++ required bool // Is a simple key required? ++ token_number int // The number of the token. ++ mark yaml_mark_t // The position mark. ++} ++ ++// The states of the parser. ++type yaml_parser_state_t int ++ ++const ( ++ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota ++ ++ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. ++ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. ++ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. ++ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. ++ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. ++ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. ++ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. ++ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. ++ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. ++ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. ++ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. ++ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. ++ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. ++ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. ++ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. ++ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. ++ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. ++ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. ++ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. ++ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. ++ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. ++ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. ++ yaml_PARSE_END_STATE // Expect nothing. ++) ++ ++func (ps yaml_parser_state_t) String() string { ++ switch ps { ++ case yaml_PARSE_STREAM_START_STATE: ++ return "yaml_PARSE_STREAM_START_STATE" ++ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: ++ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" ++ case yaml_PARSE_DOCUMENT_START_STATE: ++ return "yaml_PARSE_DOCUMENT_START_STATE" ++ case yaml_PARSE_DOCUMENT_CONTENT_STATE: ++ return "yaml_PARSE_DOCUMENT_CONTENT_STATE" ++ case yaml_PARSE_DOCUMENT_END_STATE: ++ return "yaml_PARSE_DOCUMENT_END_STATE" ++ case yaml_PARSE_BLOCK_NODE_STATE: ++ return "yaml_PARSE_BLOCK_NODE_STATE" ++ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: ++ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" ++ case yaml_PARSE_FLOW_NODE_STATE: ++ return "yaml_PARSE_FLOW_NODE_STATE" ++ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: ++ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" ++ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: ++ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" ++ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: ++ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" ++ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: ++ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" ++ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: ++ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" ++ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: ++ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" ++ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: ++ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" ++ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: ++ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" ++ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: ++ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" ++ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: ++ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" ++ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: ++ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" ++ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: ++ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" ++ case yaml_PARSE_FLOW_MAPPING_KEY_STATE: ++ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" ++ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: ++ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" ++ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: ++ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" ++ case yaml_PARSE_END_STATE: ++ return "yaml_PARSE_END_STATE" ++ } ++ return "" ++} ++ ++// This structure holds aliases data. ++type yaml_alias_data_t struct { ++ anchor []byte // The anchor. ++ index int // The node id. ++ mark yaml_mark_t // The anchor mark. ++} ++ ++// The parser structure. ++// ++// All members are internal. Manage the structure using the ++// yaml_parser_ family of functions. ++type yaml_parser_t struct { ++ ++ // Error handling ++ ++ error yaml_error_type_t // Error type. ++ ++ problem string // Error description. ++ ++ // The byte about which the problem occurred. ++ problem_offset int ++ problem_value int ++ problem_mark yaml_mark_t ++ ++ // The error context. ++ context string ++ context_mark yaml_mark_t ++ ++ // Reader stuff ++ ++ read_handler yaml_read_handler_t // Read handler. ++ ++ input_reader io.Reader // File input data. ++ input []byte // String input data. ++ input_pos int ++ ++ eof bool // EOF flag ++ ++ buffer []byte // The working buffer. ++ buffer_pos int // The current position of the buffer. ++ ++ unread int // The number of unread characters in the buffer. ++ ++ newlines int // The number of line breaks since last non-break/non-blank character ++ ++ raw_buffer []byte // The raw buffer. ++ raw_buffer_pos int // The current position of the buffer. ++ ++ encoding yaml_encoding_t // The input encoding. ++ ++ offset int // The offset of the current position (in bytes). ++ mark yaml_mark_t // The mark of the current position. ++ ++ // Comments ++ ++ head_comment []byte // The current head comments ++ line_comment []byte // The current line comments ++ foot_comment []byte // The current foot comments ++ tail_comment []byte // Foot comment that happens at the end of a block. ++ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) ++ ++ comments []yaml_comment_t // The folded comments for all parsed tokens ++ comments_head int ++ ++ // Scanner stuff ++ ++ stream_start_produced bool // Have we started to scan the input stream? ++ stream_end_produced bool // Have we reached the end of the input stream? ++ ++ flow_level int // The number of unclosed '[' and '{' indicators. ++ ++ tokens []yaml_token_t // The tokens queue. ++ tokens_head int // The head of the tokens queue. ++ tokens_parsed int // The number of tokens fetched from the queue. ++ token_available bool // Does the tokens queue contain a token ready for dequeueing. ++ ++ indent int // The current indentation level. ++ indents []int // The indentation levels stack. ++ ++ simple_key_allowed bool // May a simple key occur at the current position? ++ simple_keys []yaml_simple_key_t // The stack of simple keys. ++ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number ++ ++ // Parser stuff ++ ++ state yaml_parser_state_t // The current parser state. ++ states []yaml_parser_state_t // The parser states stack. ++ marks []yaml_mark_t // The stack of marks. ++ tag_directives []yaml_tag_directive_t // The list of TAG directives. ++ ++ // Dumper stuff ++ ++ aliases []yaml_alias_data_t // The alias data. ++ ++ document *yaml_document_t // The currently parsed document. ++} ++ ++type yaml_comment_t struct { ++ ++ scan_mark yaml_mark_t // Position where scanning for comments started ++ token_mark yaml_mark_t // Position after which tokens will be associated with this comment ++ start_mark yaml_mark_t // Position of '#' comment mark ++ end_mark yaml_mark_t // Position where comment terminated ++ ++ head []byte ++ line []byte ++ foot []byte ++} ++ ++// Emitter Definitions ++ ++// The prototype of a write handler. ++// ++// The write handler is called when the emitter needs to flush the accumulated ++// characters to the output. The handler should write @a size bytes of the ++// @a buffer to the output. ++// ++// @param[in,out] data A pointer to an application data specified by ++// yaml_emitter_set_output(). ++// @param[in] buffer The buffer with bytes to be written. ++// @param[in] size The size of the buffer. ++// ++// @returns On success, the handler should return @c 1. If the handler failed, ++// the returned value should be @c 0. ++// ++type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error ++ ++type yaml_emitter_state_t int ++ ++// The emitter states. ++const ( ++ // Expect STREAM-START. ++ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota ++ ++ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. ++ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. ++ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. ++ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. ++ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. ++ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out ++ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. ++ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. ++ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out ++ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. ++ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. ++ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. ++ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. ++ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. ++ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. ++ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. ++ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. ++ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. ++ yaml_EMIT_END_STATE // Expect nothing. ++) ++ ++// The emitter structure. ++// ++// All members are internal. Manage the structure using the @c yaml_emitter_ ++// family of functions. ++type yaml_emitter_t struct { ++ ++ // Error handling ++ ++ error yaml_error_type_t // Error type. ++ problem string // Error description. ++ ++ // Writer stuff ++ ++ write_handler yaml_write_handler_t // Write handler. ++ ++ output_buffer *[]byte // String output data. ++ output_writer io.Writer // File output data. ++ ++ buffer []byte // The working buffer. ++ buffer_pos int // The current position of the buffer. ++ ++ raw_buffer []byte // The raw buffer. ++ raw_buffer_pos int // The current position of the buffer. ++ ++ encoding yaml_encoding_t // The stream encoding. ++ ++ // Emitter stuff ++ ++ canonical bool // If the output is in the canonical style? ++ best_indent int // The number of indentation spaces. ++ best_width int // The preferred width of the output lines. ++ unicode bool // Allow unescaped non-ASCII characters? ++ line_break yaml_break_t // The preferred line break. ++ ++ state yaml_emitter_state_t // The current emitter state. ++ states []yaml_emitter_state_t // The stack of states. ++ ++ events []yaml_event_t // The event queue. ++ events_head int // The head of the event queue. ++ ++ indents []int // The stack of indentation levels. ++ ++ tag_directives []yaml_tag_directive_t // The list of tag directives. ++ ++ indent int // The current indentation level. ++ ++ flow_level int // The current flow level. ++ ++ root_context bool // Is it the document root context? ++ sequence_context bool // Is it a sequence context? ++ mapping_context bool // Is it a mapping context? ++ simple_key_context bool // Is it a simple mapping key context? ++ ++ line int // The current line. ++ column int // The current column. ++ whitespace bool // If the last character was a whitespace? ++ indention bool // If the last character was an indentation character (' ', '-', '?', ':')? ++ open_ended bool // If an explicit document end is required? ++ ++ space_above bool // Is there's an empty line above? ++ foot_indent int // The indent used to write the foot comment above, or -1 if none. ++ ++ // Anchor analysis. ++ anchor_data struct { ++ anchor []byte // The anchor value. ++ alias bool // Is it an alias? ++ } ++ ++ // Tag analysis. ++ tag_data struct { ++ handle []byte // The tag handle. ++ suffix []byte // The tag suffix. ++ } ++ ++ // Scalar analysis. ++ scalar_data struct { ++ value []byte // The scalar value. ++ multiline bool // Does the scalar contain line breaks? ++ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? ++ block_plain_allowed bool // Can the scalar be expressed in the block plain style? ++ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? ++ block_allowed bool // Can the scalar be expressed in the literal or folded styles? ++ style yaml_scalar_style_t // The output style. ++ } ++ ++ // Comments ++ head_comment []byte ++ line_comment []byte ++ foot_comment []byte ++ tail_comment []byte ++ ++ key_line_comment []byte ++ ++ // Dumper stuff ++ ++ opened bool // If the stream was already opened? ++ closed bool // If the stream was already closed? ++ ++ // The information associated with the document nodes. ++ anchors *struct { ++ references int // The number of references. ++ anchor int // The anchor id. ++ serialized bool // If the node has been emitted? ++ } ++ ++ last_anchor_id int // The last assigned anchor id. ++ ++ document *yaml_document_t // The currently emitted document. ++} +diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go +new file mode 100644 +index 000000000000..e88f9c54aecb +--- /dev/null ++++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go +@@ -0,0 +1,198 @@ ++// ++// Copyright (c) 2011-2019 Canonical Ltd ++// Copyright (c) 2006-2010 Kirill Simonov ++// ++// Permission is hereby granted, free of charge, to any person obtaining a copy of ++// this software and associated documentation files (the "Software"), to deal in ++// the Software without restriction, including without limitation the rights to ++// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies ++// of the Software, and to permit persons to whom the Software is furnished to do ++// so, subject to the following conditions: ++// ++// The above copyright notice and this permission notice shall be included in all ++// copies or substantial portions of the Software. ++// ++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE ++// SOFTWARE. ++ ++package yaml ++ ++const ( ++ // The size of the input raw buffer. ++ input_raw_buffer_size = 512 ++ ++ // The size of the input buffer. ++ // It should be possible to decode the whole raw buffer. ++ input_buffer_size = input_raw_buffer_size * 3 ++ ++ // The size of the output buffer. ++ output_buffer_size = 128 ++ ++ // The size of the output raw buffer. ++ // It should be possible to encode the whole output buffer. ++ output_raw_buffer_size = (output_buffer_size*2 + 2) ++ ++ // The size of other stacks and queues. ++ initial_stack_size = 16 ++ initial_queue_size = 16 ++ initial_string_size = 16 ++) ++ ++// Check if the character at the specified position is an alphabetical ++// character, a digit, '_', or '-'. ++func is_alpha(b []byte, i int) bool { ++ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' ++} ++ ++// Check if the character at the specified position is a digit. ++func is_digit(b []byte, i int) bool { ++ return b[i] >= '0' && b[i] <= '9' ++} ++ ++// Get the value of a digit. ++func as_digit(b []byte, i int) int { ++ return int(b[i]) - '0' ++} ++ ++// Check if the character at the specified position is a hex-digit. ++func is_hex(b []byte, i int) bool { ++ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' ++} ++ ++// Get the value of a hex-digit. ++func as_hex(b []byte, i int) int { ++ bi := b[i] ++ if bi >= 'A' && bi <= 'F' { ++ return int(bi) - 'A' + 10 ++ } ++ if bi >= 'a' && bi <= 'f' { ++ return int(bi) - 'a' + 10 ++ } ++ return int(bi) - '0' ++} ++ ++// Check if the character is ASCII. ++func is_ascii(b []byte, i int) bool { ++ return b[i] <= 0x7F ++} ++ ++// Check if the character at the start of the buffer can be printed unescaped. ++func is_printable(b []byte, i int) bool { ++ return ((b[i] == 0x0A) || // . == #x0A ++ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E ++ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF ++ (b[i] > 0xC2 && b[i] < 0xED) || ++ (b[i] == 0xED && b[i+1] < 0xA0) || ++ (b[i] == 0xEE) || ++ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD ++ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF ++ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) ++} ++ ++// Check if the character at the specified position is NUL. ++func is_z(b []byte, i int) bool { ++ return b[i] == 0x00 ++} ++ ++// Check if the beginning of the buffer is a BOM. ++func is_bom(b []byte, i int) bool { ++ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF ++} ++ ++// Check if the character at the specified position is space. ++func is_space(b []byte, i int) bool { ++ return b[i] == ' ' ++} ++ ++// Check if the character at the specified position is tab. ++func is_tab(b []byte, i int) bool { ++ return b[i] == '\t' ++} ++ ++// Check if the character at the specified position is blank (space or tab). ++func is_blank(b []byte, i int) bool { ++ //return is_space(b, i) || is_tab(b, i) ++ return b[i] == ' ' || b[i] == '\t' ++} ++ ++// Check if the character at the specified position is a line break. ++func is_break(b []byte, i int) bool { ++ return (b[i] == '\r' || // CR (#xD) ++ b[i] == '\n' || // LF (#xA) ++ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) ++ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) ++ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) ++} ++ ++func is_crlf(b []byte, i int) bool { ++ return b[i] == '\r' && b[i+1] == '\n' ++} ++ ++// Check if the character is a line break or NUL. ++func is_breakz(b []byte, i int) bool { ++ //return is_break(b, i) || is_z(b, i) ++ return ( ++ // is_break: ++ b[i] == '\r' || // CR (#xD) ++ b[i] == '\n' || // LF (#xA) ++ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) ++ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) ++ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) ++ // is_z: ++ b[i] == 0) ++} ++ ++// Check if the character is a line break, space, or NUL. ++func is_spacez(b []byte, i int) bool { ++ //return is_space(b, i) || is_breakz(b, i) ++ return ( ++ // is_space: ++ b[i] == ' ' || ++ // is_breakz: ++ b[i] == '\r' || // CR (#xD) ++ b[i] == '\n' || // LF (#xA) ++ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) ++ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) ++ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) ++ b[i] == 0) ++} ++ ++// Check if the character is a line break, space, tab, or NUL. ++func is_blankz(b []byte, i int) bool { ++ //return is_blank(b, i) || is_breakz(b, i) ++ return ( ++ // is_blank: ++ b[i] == ' ' || b[i] == '\t' || ++ // is_breakz: ++ b[i] == '\r' || // CR (#xD) ++ b[i] == '\n' || // LF (#xA) ++ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) ++ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) ++ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) ++ b[i] == 0) ++} ++ ++// Determine the width of the character. ++func width(b byte) int { ++ // Don't replace these by a switch without first ++ // confirming that it is being inlined. ++ if b&0x80 == 0x00 { ++ return 1 ++ } ++ if b&0xE0 == 0xC0 { ++ return 2 ++ } ++ if b&0xF0 == 0xE0 { ++ return 3 ++ } ++ if b&0xF8 == 0xF0 { ++ return 4 ++ } ++ return 0 ++ ++} +diff --git a/vendor/modules.txt b/vendor/modules.txt +index 4e0448570ce9..577e9de880c6 100644 +--- a/vendor/modules.txt ++++ b/vendor/modules.txt +@@ -33,12 +33,20 @@ github.com/containerd/log + # github.com/containerd/platforms v0.2.0 + ## explicit; go 1.20 + github.com/containerd/platforms ++# github.com/cpuguy83/go-md2man/v2 v2.0.3 ++## explicit; go 1.11 ++github.com/cpuguy83/go-md2man/v2 ++github.com/cpuguy83/go-md2man/v2/md2man + # github.com/creack/pty v1.1.21 + ## explicit; go 1.13 + github.com/creack/pty + # github.com/distribution/reference v0.5.0 + ## explicit; go 1.20 + github.com/distribution/reference ++# github.com/docker/cli-docs-tool v0.6.0 ++## explicit; go 1.18 ++github.com/docker/cli-docs-tool ++github.com/docker/cli-docs-tool/annotation + # github.com/docker/distribution v2.8.3+incompatible + ## explicit + github.com/docker/distribution +@@ -252,12 +260,16 @@ github.com/prometheus/procfs/internal/util + # github.com/rivo/uniseg v0.2.0 + ## explicit; go 1.12 + github.com/rivo/uniseg ++# github.com/russross/blackfriday/v2 v2.1.0 ++## explicit ++github.com/russross/blackfriday/v2 + # github.com/sirupsen/logrus v1.9.3 + ## explicit; go 1.13 + github.com/sirupsen/logrus + # github.com/spf13/cobra v1.8.0 + ## explicit; go 1.15 + github.com/spf13/cobra ++github.com/spf13/cobra/doc + # github.com/spf13/pflag v1.0.5 + ## explicit; go 1.12 + github.com/spf13/pflag +@@ -498,6 +510,9 @@ google.golang.org/protobuf/types/known/wrapperspb + # gopkg.in/yaml.v2 v2.4.0 + ## explicit; go 1.15 + gopkg.in/yaml.v2 ++# gopkg.in/yaml.v3 v3.0.1 ++## explicit ++gopkg.in/yaml.v3 + # gotest.tools/v3 v3.5.1 + ## explicit; go 1.17 + gotest.tools/v3/assert +-- +2.45.2 + diff --git a/docker-26.1.4_ce_de5c9cf0b96e.tar.xz b/docker-26.1.4_ce_de5c9cf0b96e.tar.xz new file mode 100644 index 0000000..847de52 --- /dev/null +++ b/docker-26.1.4_ce_de5c9cf0b96e.tar.xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a866c020abe705657cb373e692db7f1ad4ad547b9e25c7a557a06f4549a63c9 +size 9909596 diff --git a/docker-26.1.5_ce_411e817ddf71.tar.xz b/docker-26.1.5_ce_411e817ddf71.tar.xz new file mode 100644 index 0000000..c522cf5 --- /dev/null +++ b/docker-26.1.5_ce_411e817ddf71.tar.xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2d3862e95b45b04830bbce47827e07eceaa41761fa5182bd9b492621cbe8469 +size 9910176 diff --git a/docker-audit.rules b/docker-audit.rules new file mode 100644 index 0000000..363341e --- /dev/null +++ b/docker-audit.rules @@ -0,0 +1,27 @@ +## +# Audit rules based on CIS Docker 1.6 Benchmark v1.0.0 +# https://benchmarks.cisecurity.org/tools2/docker/CIS_Docker_1.6_Benchmark_v1.0.0.pdf +# Not all of these apply to SUSE. +# 1.8 Audit docker daemon +-w /usr/bin/docker -k docker +# 1.9 Audit Docker files and directories +-w /var/lib/docker -k docker +# 1.10 Audit /etc/docker +-w /etc/docker -k docker +# 1.11 Audit Docker files and directories - docker-registry.service +-w /usr/lib/systemd/system/docker-registry.service -k docker +# 1.12 Audit Docker files and directories - docker.service +-w /usr/lib/systemd/system/docker.service -k docker +# 1.13 Audit Docker files and directories - /var/run/docker.sock +-w /var/run/docker.sock -k docker +# 1.14 Audit Docker files and directories - /etc/sysconfig/docker +-w /etc/sysconfig/docker -k docker +# 1.15 Audit Docker files and directories - /etc/sysconfig/docker-network +-w /etc/sysconfig/docker-network -k docker +# 1.16 Audit Docker files and directories - /etc/sysconfig/docker-registry +-w /etc/sysconfig/docker-registry -k docker +# 1.17 Audit Docker files and directories - /etc/sysconfig/docker-storage +-w /etc/sysconfig/docker-storage -k docker +# 1.18 Audit Docker files and directories - /etc/default/docker +-w /etc/default/docker -k docker +## end docker audit rules diff --git a/docker-cli-26.1.4_ce.tar.xz b/docker-cli-26.1.4_ce.tar.xz new file mode 100644 index 0000000..a389b3f --- /dev/null +++ b/docker-cli-26.1.4_ce.tar.xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a2b7ab7e665e9469fdd71bca1dd28ead5dc58dc9886f285f1fa75978ef5c078 +size 3971272 diff --git a/docker-cli-26.1.5_ce.tar.xz b/docker-cli-26.1.5_ce.tar.xz new file mode 100644 index 0000000..5fc0e11 --- /dev/null +++ b/docker-cli-26.1.5_ce.tar.xz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3723a617ed00e85117c3de2691e1aec6267020ee49fdb0db72c12981d0d26f9e +size 3972080 diff --git a/docker-daemon.json b/docker-daemon.json new file mode 100644 index 0000000..3661a83 --- /dev/null +++ b/docker-daemon.json @@ -0,0 +1,8 @@ +{ + "log-level": "warn", + "log-driver": "json-file", + "log-opts": { + "max-size": "10m", + "max-file": "5" + } +} diff --git a/docker-rpmlintrc b/docker-rpmlintrc new file mode 100644 index 0000000..6a3ca96 --- /dev/null +++ b/docker-rpmlintrc @@ -0,0 +1,2 @@ +addFilter("^docker-bash-completion.noarch: (E|W): non-executable-script /usr/share/bash-completion/completions/docker") +addFilter("^docker-zsh-completion.noarch: W: non-conffile-in-etc /etc/zsh_completion.d/_docker") diff --git a/docker.changes b/docker.changes new file mode 100644 index 0000000..4f91614 --- /dev/null +++ b/docker.changes @@ -0,0 +1,4132 @@ +------------------------------------------------------------------- +Wed Jul 31 05:28:09 UTC 2024 - Aleksa Sarai + +- Update to Docker 26.1.5-ce. See upstream changelog online at + +- This update includes a fix for CVE-2024-41110. bsc#1228324 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch + * 0006-bsc1221916-update-to-patched-buildkit-version-to-fix.patch + * 0007-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch + * cli-0001-docs-include-required-tools-in-source-tree.patch + +------------------------------------------------------------------- +Wed Jul 31 04:58:15 UTC 2024 - Aleksa Sarai + +[NOTE: This update was only ever released in SLES and Leap.] + +- Update to Docker 25.0.6-ce. See upstream changelog online at + +- This update includes a fix for CVE-2024-41110. bsc#1228324 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch + * 0006-bsc1221916-update-to-patched-buildkit-version-to-fix.patch + * 0007-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch + +------------------------------------------------------------------- +Mon Jun 24 08:15:24 UTC 2024 - Aleksa Sarai + +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch +- Fix BuildKit's symlink resolution logic to correctly handle non-lexical + symlinks. Backport of and + . bsc#1221916 + + 0006-bsc1221916-update-to-patched-buildkit-version-to-fix.patch +- Write volume options atomically so sudden system crashes won't result in + future Docker starts failing due to empty files. Backport of + . bsc#1214855 + + 0007-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch + +------------------------------------------------------------------- +Thu Jun 6 04:17:23 UTC 2024 - Aleksa Sarai + +- Update to Docker 26.1.4-ce. See upstream changelog online at + +- Rebase patches: + * cli-0001-docs-include-required-tools-in-source-tree.patch + +------------------------------------------------------------------- +Wed Apr 24 13:43:30 UTC 2024 - Aleksa Sarai + +- Update to Docker 26.1.0-ce. See upstream changelog online at + +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch + * cli-0001-docs-include-required-tools-in-source-tree.patch + +------------------------------------------------------------------- +Thu Apr 18 07:46:18 UTC 2024 - Aleksa Sarai + +- Update to Docker 26.0.1-ce. See upstream changelog online at + +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch + * cli-0001-docs-include-required-tools-in-source-tree.patch +- Update --add-runtime to point to correct binary path. + +------------------------------------------------------------------- +Mon Mar 25 12:34:56 UTC 2024 - Aleksa Sarai + +[NOTE: This update was only ever released in SLES and Leap.] + +- Update to Docker 25.0.5-ce. See upstream changelog online at + bsc#1223409 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch + * cli-0001-docs-include-required-tools-in-source-tree.patch +- Remove upstreamed patches: + - 0007-daemon-overlay2-remove-world-writable-permission-fro.patch +- Update --add-runtime to point to correct binary path. + +------------------------------------------------------------------- +Fri Mar 8 07:46:11 UTC 2024 - Dan Čermák + +[NOTE: This update was only ever released in SLES and Leap.] + +- Add patch to fix bsc#1220339 + * 0007-daemon-overlay2-remove-world-writable-permission-fro.patch + +- rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch + * 0006-Vendor-in-latest-buildkit-v0.11-branch-including-CVE.patch + +------------------------------------------------------------------- +Thu Feb 22 14:13:42 UTC 2024 - Thorsten Kukuk + +- Allow to disable apparmor support (ALP supports only SELinux) + +------------------------------------------------------------------- +Wed Feb 17 12:56:22 UTC 2024 - Danish Prakash + +- Update to Docker 25.0.3-ce. See upstream changelog online at + +- Fixes: + * bsc#1219267 - CVE-2024-23651 + * bsc#1219268 - CVE-2024-23652 + * bsc#1219438 - CVE-2024-23653 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch + * cli-0001-docs-include-required-tools-in-source-tree.patch +- Remove upstreamed patches: + - 0006-Vendor-in-latest-buildkit-v0.11-branch-including-CVE.patch + +------------------------------------------------------------------- +Wed Feb 14 08:40:36 UTC 2024 - Dan Čermák + +- Vendor latest buildkit v0.11: + Add patch 0006-Vendor-in-latest-buildkit-v0.11-branch-including-CVE.patch that + vendors in the latest v0.11 buildkit branch including bugfixes for the following: + * bsc#1219438: CVE-2024-23653 + * bsc#1219268: CVE-2024-23652 + * bsc#1219267: CVE-2024-23651 + +- rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch + +- switch from %patchN to %patch -PN syntax +- remove unused rpmlint filters and add filters to silence pointless bash & zsh + completion warnings + +------------------------------------------------------------------- +Fri Oct 27 21:14:37 UTC 2023 - Aleksa Sarai + +- Update to Docker 24.0.7-ce. See upstream changelog online at + . bsc#1217513 + * Deny containers access to /sys/devices/virtual/powercap by default. + - CVE-2020-8694 bsc#1170415 + - CVE-2020-8695 bsc#1170446 + - CVE-2020-12912 bsc#1178760 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch + * cli-0001-docs-include-required-tools-in-source-tree.patch + +------------------------------------------------------------------- +Wed Oct 11 10:32:43 UTC 2023 - Aleksa Sarai + +- Add a patch to fix apparmor on SLE-12, reverting the upstream removal of + version-specific templating for the default apparmor profile. bsc#1213500 + + 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + +------------------------------------------------------------------- +Thu Sep 14 01:46:30 UTC 2023 - Aleksa Sarai + +- Update to Docker 24.0.6-ce. See upstream changelog online at + . bsc#1215323 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * cli-0001-docs-include-required-tools-in-source-tree.patch +- Switch from disabledrun to manualrun in _service. +- Add a docker.socket unit file, but with socket activation effectively + disabled to ensure that Docker will always run even if you start the socket + individually. Users should probably just ignore this unit file. bsc#1210141 + +------------------------------------------------------------------- +Tue Jul 25 19:40:25 UTC 2023 - Dirk Müller + +- Update to Docker 24.0.5-ce. See upstream changelog online at + . bsc#1213229 + +------------------------------------------------------------------- +Fri Jul 7 21:29:05 UTC 2023 - Aleksa Sarai + +- Update to Docker 24.0.4-ce. See upstream changelog online at + . bsc#1213500 + +------------------------------------------------------------------- +Fri Jul 7 02:35:02 UTC 2023 - Aleksa Sarai + +- Update to Docker 24.0.3-ce. See upstream changelog online at + . bsc#1213120 +- Rebase patches: + * cli-0001-docs-include-required-tools-in-source-tree.patch + +------------------------------------------------------------------- +Thu Jun 29 10:07:13 UTC 2023 - Danish Prakash + +- Recommend docker-rootless-extras instead of Require(ing) it, given + it's an additional functionality and not inherently required for + docker to function. + +------------------------------------------------------------------- +Tue Jun 20 15:28:13 UTC 2023 - Danish Prakash + +- Add docker-rootless-extras subpackage + (https://docs.docker.com/engine/security/rootless) + +------------------------------------------------------------------- +Wed Jun 14 13:02:01 UTC 2023 - Aleksa Sarai + +- Update to Docker 24.0.2-ce. See upstream changelog online at + . bsc#1212368 + * Includes the upstreamed fix for the mount table pollution issue. + bsc#1210797 +- Add Recommends for docker-buildx, and add /usr/lib/docker/cli-plugins as + being provided by this package. +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * cli-0001-docs-include-required-tools-in-source-tree.patch + +------------------------------------------------------------------- +Sun May 21 02:31:35 UTC 2023 - Aleksa Sarai + +- Update to Docker 23.0.6-ce. See upstream changelog online at + . bsc#1211578 +- Rebase patches: + * cli-0001-docs-include-required-tools-in-source-tree.patch +- Re-unify packaging for SLE-12 and SLE-15. +- Add patch to fix build on SLE-12 by switching back to libbtrfs-devel headers + (the uapi headers in SLE-12 are too old). + + 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch +- Re-numbered patches: + - 0003-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + + 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch` + +------------------------------------------------------------------- +Thu Apr 27 14:09:05 UTC 2023 - Aleksa Sarai + +- Update to Docker 23.0.5-ce. See upstream changelog online at + . +- Rebase patches: + * cli-0001-docs-include-required-tools-in-source-tree.patch + +------------------------------------------------------------------- +Wed Apr 26 00:31:54 UTC 2023 - Aleksa Sarai + +- Update to Docker 23.0.4-ce. See upstream changelog online at + . bsc#1208074 +- Fixes: + * bsc#1214107 - CVE-2023-28840 + * bsc#1214108 - CVE-2023-28841 + * bsc#1214109 - CVE-2023-28842 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-bsc1073877-apparmor-clobber-docker-default-profile-o.patch +- Renumbered patches: + - 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch +- Remove upstreamed patches: + - 0005-bsc1183855-btrfs-Do-not-disable-quota-on-cleanup.patch + - 0006-bsc1193930-vendor-update-golang.org-x-crypto.patch + - 0007-bsc1200022-fifo.Close-prevent-possible-panic-if-fifo.patch +- Backport to allow man pages to be + built without internet access in OBS. + + cli-0001-docs-include-required-tools-in-source-tree.patch + +------------------------------------------------------------------- +Wed Feb 1 14:33:19 UTC 2023 - Dirk Müller + +- update to 20.10.23-ce. + * see upstream changelog at https://docs.docker.com/engine/release-notes/#201023 + +- drop kubic flavor as kubic is EOL. this removes: + kubelet.env docker-kubic-service.conf 0003-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + +------------------------------------------------------------------- +Tue Dec 6 11:49:32 UTC 2022 - Aleksa Sarai + +- Update to Docker 20.10.21-ce. See upstream changelog online at + . bsc#1206065 + bsc#1205375 CVE-2022-36109 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-bsc1183855-btrfs-Do-not-disable-quota-on-cleanup.patch + * 0006-bsc1193930-vendor-update-golang.org-x-crypto.patch + * 0007-bsc1200022-fifo.Close-prevent-possible-panic-if-fifo.patch +- The PRIVATE-REGISTRY patch will now output a warning if it is being used (in + preparation for removing the feature). This feature was never meant to be + used by users directly (and is only available in the -kubic/CaaSP version of + the package anyway) and thus should not affect any users. + +------------------------------------------------------------------- +Mon Oct 24 09:45:20 UTC 2022 - Dan Čermák + +- Fix wrong After: in docker.service, fixes bsc#1188447 + +------------------------------------------------------------------- +Thu Sep 29 08:40:35 UTC 2022 - Aleksa Sarai + +- Add apparmor-parser as a Recommends to make sure that most users will end up + with it installed even if they are primarily running SELinux. + +------------------------------------------------------------------- +Thu Sep 29 07:27:03 UTC 2022 - Fabian Vogt + +- Fix syntax of boolean dependency + +------------------------------------------------------------------- +Thu Jul 28 07:42:33 UTC 2022 - Frederic Crozat + +- Allow to install container-selinux instead of apparmor-parser. + +------------------------------------------------------------------- +Sun Jul 17 17:06:01 UTC 2022 - Callum Farmer + +- Change to using systemd-sysusers + +------------------------------------------------------------------- +Wed Jun 29 12:19:55 UTC 2022 - Aleksa Sarai + +- Backport to fix a crash-on-start + issue with dockerd. bsc#1200022 + + 0007-bsc1200022-fifo.Close-prevent-possible-panic-if-fifo.patch + +------------------------------------------------------------------- +Tue Jun 7 07:18:41 UTC 2022 - Aleksa Sarai + +- Update to Docker 20.10.17-ce. See upstream changelog online at + . bsc#1200145 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-bsc1183855-btrfs-Do-not-disable-quota-on-cleanup.patch + * 0006-bsc1193930-vendor-update-golang.org-x-crypto.patch + +------------------------------------------------------------------- +Fri Apr 29 02:51:43 UTC 2022 - Aleksa Sarai + +- Add patch to update golang.org/x/crypto for CVE-2021-43565 and CVE-2022-27191. + bsc#1193930 bsc#1197284 + * 0006-bsc1193930-vendor-update-golang.org-x-crypto.patch +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-bsc1183855-btrfs-Do-not-disable-quota-on-cleanup.patch + +------------------------------------------------------------------- +Thu Apr 14 04:09:58 UTC 2022 - Aleksa Sarai + +- Update to Docker 20.10.14-ce. See upstream changelog online at + . bsc#1197517 + CVE-2022-24769 + +------------------------------------------------------------------- +Mon Jan 17 07:23:01 UTC 2022 - Aleksa Sarai + +- Update to Docker 20.10.12-ce. See upstream changelog online at + . +- Remove CHANGELOG.md. It hasn't been maintained since 2017, and all of the + changelogs are currently only available online. + +------------------------------------------------------------------- +Thu Nov 18 08:35:37 UTC 2021 - Aleksa Sarai + +- Update to Docker 20.10.11-ce. See upstream changelog online at + . bsc#1192814 + bsc#1193273 CVE-2021-41190 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-bsc1183855-btrfs-Do-not-disable-quota-on-cleanup.patch +- Remove upstreamed patches: + - 0006-bsc1190670-seccomp-add-support-for-clone3-syscall-in.patch + +------------------------------------------------------------------- +Wed Oct 6 02:51:16 UTC 2021 - Aleksa Sarai + +- Update to Docker 20.10.9-ce. See upstream changelog online at + . bsc#1191355 + CVE-2021-41089 bsc#1191015 CVE-2021-41091 bsc#1191434 + CVE-2021-41092 bsc#1191334 CVE-2021-41103 bsc#1191121 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + * 0005-bsc1183855-btrfs-Do-not-disable-quota-on-cleanup.patch + * 0006-bsc1190670-seccomp-add-support-for-clone3-syscall-in.patch +- Switch to Go 1.16.x compiler, in line with upstream. + +------------------------------------------------------------------- +Mon Sep 20 23:59:05 UTC 2021 - Aleksa Sarai + +- Add patch to return ENOSYS for clone3 to avoid breaking glibc again. + bsc#1190670 + + 0006-bsc1190670-seccomp-add-support-for-clone3-syscall-in.patch + +------------------------------------------------------------------- +Mon May 3 13:24:55 UTC 2021 - Aleksa Sarai + +- Add shell requires for the *-completion subpackages. + +------------------------------------------------------------------- +Thu Apr 15 05:23:20 UTC 2021 - Aleksa Sarai + +- Update to Docker 20.10.6-ce. See upstream changelog online at + . bsc#1184768 +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch +- Backport upstream fix for btrfs + quotas being removed by Docker regularly. bsc#1183855 bsc#1175081 + + 0005-bsc1183855-btrfs-Do-not-disable-quota-on-cleanup.patch + +------------------------------------------------------------------- +Wed Mar 3 00:49:58 UTC 2021 - Aleksa Sarai + +- Update to Docker 20.10.5-ce. See upstream changelog online at + . bsc#1182947 +- Update runc dependency to 1.0.0~rc93. +- Remove upstreamed patches: + - cli-0001-Rename-bin-md2man-to-bin-go-md2man.patch +- Rebase patches: + * 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + * 0003-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + * 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch +- Switch version to use -ce suffix rather than _ce to avoid confusing other + tools. boo#1182476 + +------------------------------------------------------------------- +Sun Feb 14 06:33:16 UTC 2021 - Aleksa Sarai + +[NOTE: This update was only ever released in SLES and Leap.] + +- It turns out the boo#1178801 libnetwork patch is also broken on Leap, so drop + the patch entirely. bsc#1180401 bsc#1182168 + - boo1178801-0001-Add-docker-interfaces-to-firewalld-docker-zone.patch + +------------------------------------------------------------------- +Wed Feb 10 07:40:36 UTC 2021 - Aleksa Sarai + +- Fix incorrect cast in SUSE secrets patches causing warnings on SLES. + * 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Sat Feb 6 12:36:42 UTC 2021 - Aleksa Sarai + +[NOTE: This update was only ever released in SLES and Leap.] + +- Update Docker to 19.03.15-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. This update includes fixes for + bsc#1181732 (CVE-2021-21284) and bsc#1181730 (CVE-2021-21285). +- Rebase patches: + * bsc1073877-0001-apparmor-clobber-docker-default-profile-on-start.patch +- Only apply the boo#1178801 libnetwork patch to handle firewalld on openSUSE. + It appears that SLES doesn't like the patch. bsc#1180401 + +------------------------------------------------------------------- +Tue Feb 2 13:06:17 UTC 2021 - Aleksa Sarai + +- Update to Docker 20.10.3-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. Fixes bsc#1181732 + (CVE-2021-21284) and bsc#1181730 (CVE-2021-21285). +- Rebase patches on top of 20.10.3-ce. + - 0002-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + + 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + - 0003-SECRETS-SUSE-implement-SUSE-container-secrets.patch + + 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch + - 0004-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + + 0003-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + - 0005-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + + 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + +------------------------------------------------------------------- +Tue Feb 2 05:28:01 UTC 2021 - Aleksa Sarai + +- Drop docker-runc, docker-test and docker-libnetwork packages. We now just use + the upstream runc package (it's stable enough and Docker no longer pins git + versions). docker-libnetwork is so unstable that it doesn't have any + versioning scheme and so it really doesn't make sense to maintain the project + as a separate package. bsc#1181641 bsc#1181677 +- Remove no-longer-needed patch for packaging now that we've dropped + docker-runc and docker-libnetwork. + - 0001-PACKAGING-revert-Remove-docker-prefix-for-containerd.patch + +------------------------------------------------------------------- +Fri Jan 29 22:55:48 UTC 2021 - Aleksa Sarai + +- Update to Docker 20.10.2-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. bsc#1181594 +- Remove upstreamed patches: + - bsc1122469-0001-apparmor-allow-readby-and-tracedby.patch + - boo1178801-0001-Add-docker-interfaces-to-firewalld-docker-zone.patch +- Add patches to fix build: + + cli-0001-Rename-bin-md2man-to-bin-go-md2man.patch +- Since upstream has changed their source repo (again) we have to rebase all of + our patches. While doing this, I've collapsed all patches into one branch + per-release and thus all the patches are now just one series: + - packaging-0001-revert-Remove-docker-prefix-for-containerd-and-runc-.patch + + 0001-PACKAGING-revert-Remove-docker-prefix-for-containerd.patch + - secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + + 0002-SECRETS-daemon-allow-directory-creation-in-run-secre.patch + - secrets-0002-SUSE-implement-SUSE-container-secrets.patch + + 0003-SECRETS-SUSE-implement-SUSE-container-secrets.patch + - private-registry-0001-Add-private-registry-mirror-support.patch + + 0004-PRIVATE-REGISTRY-add-private-registry-mirror-support.patch + - bsc1073877-0001-apparmor-clobber-docker-default-profile-on-start.patch + + 0005-bsc1073877-apparmor-clobber-docker-default-profile-o.patch + +------------------------------------------------------------------- +Fri Jan 29 11:54:53 UTC 2021 - Aleksa Sarai + +- Re-apply secrets fix for bsc#1065609 which appears to have been lost after it + was fixed. + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Wed Dec 23 06:40:46 UTC 2020 - Aleksa Sarai + +- Add Conflicts and Provides for kubic flavour of docker-fish-completion. + +------------------------------------------------------------------- +Mon Dec 21 07:06:53 UTC 2020 - Aleksa Sarai + +- Update to Docker 19.03.14-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. CVE-2020-15257 bsc#1180243 + + https://github.com/docker/docker-ce/releases/tag/v19.03.14 + +------------------------------------------------------------------- +Mon Dec 14 13:45:56 UTC 2020 - Robert Munteanu + +- Enable fish-completion + +------------------------------------------------------------------- +Thu Nov 12 18:36:26 UTC 2020 - Michał Rostecki + +- Add a patch which makes Docker compatible with firewalld with + nftables backend. Backport of https://github.com/moby/libnetwork/pull/2548 + (boo#1178801, SLE-16460) + * boo1178801-0001-Add-docker-interfaces-to-firewalld-docker-zone.patch + +------------------------------------------------------------------- +Fri Sep 18 08:20:04 UTC 2020 - Aleksa Sarai + +- Update to Docker 19.03.13-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. bsc#1176708 + +------------------------------------------------------------------- +Mon Aug 3 16:58:07 UTC 2020 - Callum Farmer + +- Fixes for %_libexecdir changing to /usr/libexec (bsc#1174075) + +------------------------------------------------------------------- +Tue Jun 30 23:00:00 UTC 2020 - Dominique Leuenberger + +- Emergency fix: %requires_eq does not work with provide symbols, + only effective package names. Convert back to regular Requires. + +------------------------------------------------------------------- +Thu Jun 25 21:54:46 UTC 2020 - Aleksa Sarai + +- Update to Docker 19.03.12-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. +- Use Go 1.13 instead of Go 1.14 because Go 1.14 can cause all sorts of + spurrious errors due to Go returning -EINTR from I/O syscalls much more often + (due to Go 1.14's pre-emptive goroutine support). + - bsc1172377-0001-unexport-testcase.Cleanup-to-fix-Go-1.14.patch +- Add BuildRequires for all -git dependencies so that we catch missing + dependencies much more quickly. + +------------------------------------------------------------------- +Tue Jun 2 08:37:06 UTC 2020 - Aleksa Sarai + +- Update to Docker 19.03.11-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. bsc#1172377 CVE-2020-13401 +- Backport https://github.com/gotestyourself/gotest.tools/pull/169 so that we + can build Docker with Go 1.14 (upstream uses Go 1.13). + + bsc1172377-0001-unexport-testcase.Cleanup-to-fix-Go-1.14.patch + +------------------------------------------------------------------- +Thu Dec 19 15:42:26 UTC 2019 - Dominique Leuenberger + +- BuildRequire pkgconfig(libsystemd) instead of systemd-devel: + Allow OBS to shortcut through the -mini flavors. + +------------------------------------------------------------------- +Thu Dec 12 13:27:21 UTC 2019 - Aleksa Sarai + +- Add backport of https://github.com/docker/docker/pull/39121. bsc#1122469 + + bsc1122469-0001-apparmor-allow-readby-and-tracedby.patch + +------------------------------------------------------------------- +Wed Dec 11 23:55:40 UTC 2019 - Aleksa Sarai + +- Support older SLE systems which don't have "usermod -w -v". + +------------------------------------------------------------------- +Mon Nov 18 04:46:31 UTC 2019 - Aleksa Sarai + +- Update to Docker 19.03.5-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. bsc#1158590 bsc#1157330 + +------------------------------------------------------------------- +Sat Oct 19 11:21:03 UTC 2019 - Aleksa Sarai + +- Update to Docker 19.03.4-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. + +------------------------------------------------------------------- +Tue Oct 8 21:47:56 UTC 2019 - Aleksa Sarai + +- Drop containerd.service workaround (we've released enough versions without + containerd.service -- there's no need to support package upgrades that old). +- Update to Docker 19.03.3-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. bsc#1153367 + +------------------------------------------------------------------- +Tue Oct 1 23:54:25 UTC 2019 - Aleksa Sarai + +- Update to Docker 19.03.2-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. bsc#1150397 + +------------------------------------------------------------------- +Sun Sep 22 17:41:56 UTC 2019 - Chris Coutinho + +- Fix zsh-completion (docker -> _docker) + +------------------------------------------------------------------- +Tue Jul 30 05:14:44 UTC 2019 - Aleksa Sarai + +- Fix default installation such that --userns-remap=default works properly + (this appears to be an upstream regression, where --userns-remap=default + doesn't auto-create the group and results in an error on-start). boo#1143349 + +------------------------------------------------------------------- +Fri Jul 26 12:49:18 UTC 2019 - Aleksa Sarai + +- Update to Docker 19.03.1-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. CVE-2019-14271 + +------------------------------------------------------------------- +Mon Jul 22 22:13:30 UTC 2019 - Aleksa Sarai + +- Update to Docker 19.03.0-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. bsc#1142413 +- Remove upstreamed patches: + - bsc1001161-0001-oci-include-the-domainname-in-kernel.domainname.patch + - bsc1001161-0002-cli-add-a-separate-domainname-flag.patch + - bsc1047218-0001-man-obey-SOURCE_DATE_EPOCH-when-generating-man-pages.patch + - bsc1128746-0001-integration-cli-don-t-build-test-images-if-they-alre.patch +- Rebase pacthes: + * bsc1073877-0001-apparmor-clobber-docker-default-profile-on-start.patch + * packaging-0001-revert-Remove-docker-prefix-for-containerd-and-runc-.patch + * private-registry-0001-Add-private-registry-mirror-support.patch + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Wed Jul 17 23:15:33 UTC 2019 - Aleksa Sarai + +- Move bash-completion to correct location. +- Update to Docker 18.09.8-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. + * Includes fixes for CVE-2019-13509 bsc#1142160. + +------------------------------------------------------------------- +Fri Jun 28 01:21:19 UTC 2019 - Aleksa Sarai + +- Update to Docker 18.09.7-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. bsc#1139649 +- Remove upstreamed patches: + - CVE-2018-15664.patch + +------------------------------------------------------------------- +Thu Jun 27 07:12:57 UTC 2019 - Aleksa Sarai + +- Use %config(noreplace) for /etc/docker/daemon.json. bsc#1138920 + +------------------------------------------------------------------- +Fri Jun 7 08:36:17 UTC 2019 - Aleksa Sarai + +- Add patch for CVE-2018-15664. bsc#1096726 + + CVE-2018-15664.patch + +------------------------------------------------------------------- +Mon May 6 18:25:14 UTC 2019 - Aleksa Sarai + +- Update to Docker 18.09.6-ce see upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. +- Rebase patches: + * bsc1128746-0001-integration-cli-don-t-build-test-images-if-they-alre.patch + +------------------------------------------------------------------- +Fri May 3 14:02:46 UTC 2019 - Aleksa Sarai + +- Update to Docker 18.09.5-ce see upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. bsc#1128376 boo#1134068 +- Rebase patches: + * bsc1001161-0001-oci-include-the-domainname-in-kernel.domainname.patch + * bsc1001161-0002-cli-add-a-separate-domainname-flag.patch + * bsc1047218-0001-man-obey-SOURCE_DATE_EPOCH-when-generating-man-pages.patch + * bsc1128746-0001-integration-cli-don-t-build-test-images-if-they-alre.patch + * packaging-0001-revert-Remove-docker-prefix-for-containerd-and-runc-.patch + * private-registry-0001-Add-private-registry-mirror-support.patch + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch +- Updated patch name: + + bsc1073877-0001-apparmor-clobber-docker-default-profile-on-start.patch + - bsc1073877-0002-apparmor-clobber-docker-default-profile-on-start.patch + +------------------------------------------------------------------- +Fri Mar 22 09:19:28 UTC 2019 - Sascha Grunert + +- Update to Docker 18.09.3-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. + +------------------------------------------------------------------- +Sun Mar 10 21:12:09 UTC 2019 - Aleksa Sarai + +- docker-test: improvements to test packaging (we don't need to ship around the + entire source tree, and we also need to build the born-again integration/ + tests which contain a suite-per-directory). We also need a new patch which + fixes the handling of *-test images. bsc#1128746 + + bsc1128746-0001-integration-cli-don-t-build-test-images-if-they-alre.patch + +------------------------------------------------------------------- +Tue Feb 26 09:39:57 UTC 2019 - Michal Jura + +- Move daemon.json file to /etc/docker directory, bsc#1114832 + +------------------------------------------------------------------- +Sat Feb 9 13:54:03 UTC 2019 - Aleksa Sarai + +- Update shell completion to use Group: System/Shells. + +------------------------------------------------------------------- +Wed Feb 6 14:37:43 UTC 2019 - Michal Jura + +- Add daemon.json file with rotation logs cofiguration, bsc#1114832 + +------------------------------------------------------------------- +Tue Feb 5 11:24:02 UTC 2019 - Aleksa Sarai + +- Update to Docker 18.09.1-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. bsc#1124308 + * Includes fix for CVE-2018-10892 bsc#1100331. + * Includes fix for CVE-2018-20699 bsc#1121768. +- Remove upstreamed patches. + - bsc1073877-0001-apparmor-allow-receiving-of-signals-from-docker-kill.patch + +------------------------------------------------------------------- +Fri Jan 11 09:57:32 UTC 2019 - Sascha Grunert + +- Disable leap based builds for kubic flavor. bsc#1121412 + +------------------------------------------------------------------- +Wed Dec 19 19:28:47 UTC 2018 - clee@suse.com + +- Update go requirements to >= go1.10.6 to fix + * bsc#1118897 CVE-2018-16873 + go#29230 cmd/go: remote command execution during "go get -u" + * bsc#1118898 CVE-2018-16874 + go#29231 cmd/go: directory traversal in "go get" via curly braces in import paths + * bsc#1118899 CVE-2018-16875 + go#29233 crypto/x509: CPU denial of service + +------------------------------------------------------------------- +Tue Dec 18 10:10:06 UTC 2018 - Aleksa Sarai + +- Handle build breakage due to missing 'export GOPATH' (caused by resolution of + boo#1119634). I believe Docker is one of the only packages with this problem. + +------------------------------------------------------------------- +Mon Dec 3 16:14:22 UTC 2018 - Aleksa Sarai + +- Add backports of https://github.com/docker/docker/pull/37302 and + https://github.com/docker/cli/pull/1130, which allow for users to explicitly + specify the NIS domainname of a container. bsc#1001161 + + bsc1001161-0001-oci-include-the-domainname-in-kernel.domainname.patch + + bsc1001161-0002-cli-add-a-separate-domainname-flag.patch + +------------------------------------------------------------------- +Thu Nov 29 09:41:11 UTC 2018 - Aleksa Sarai + +- Update docker.service to match upstream and avoid rlimit problems. + bsc#1112980 +- Upgrade to Docker 18.09.0-ce. See upstream changelog in the packaged + /usr/share/doc/packages/docker/CHANGELOG.md. boo#1115464 bsc#1118990 +- Add revert of an upstream patch to fix docker-* handling. + + packaging-0001-revert-Remove-docker-prefix-for-containerd-and-runc-.patch +- Rebase patches: + * bsc1047218-0001-man-obey-SOURCE_DATE_EPOCH-when-generating-man-pages.patch + * bsc1073877-0001-apparmor-allow-receiving-of-signals-from-docker-kill.patch + * bsc1073877-0002-apparmor-clobber-docker-default-profile-on-start.patch + * private-registry-0001-Add-private-registry-mirror-support.patch + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch +- Remove upstreamed patches: + - bsc1100727-0001-build-add-buildmode-pie.patch + +------------------------------------------------------------------- +Mon Oct 8 06:41:21 UTC 2018 - Valentin Rothberg + +- Reduce the disk footprint by recommending git-core instead of + hard requiring it. + bsc#1108038 + +------------------------------------------------------------------- +Tue Sep 4 08:32:43 UTC 2018 - rbrown@suse.com + +- ExcludeArch i586 for entire docker-kubic flavour + +------------------------------------------------------------------- +Tue Sep 4 07:32:47 UTC 2018 - rbrown@suse.com + +- ExcludeArch i586 for docker-kubic-kubeadm-criconfig subpackage + +------------------------------------------------------------------- +Fri Aug 24 08:17:41 UTC 2018 - asarai@suse.com + +- Add patch to make package reproducible, which is a backport of + https://github.com/docker/cli/pull/1306. boo#1047218 + + bsc1047218-0001-man-obey-SOURCE_DATE_EPOCH-when-generating-man-pages.patch + +------------------------------------------------------------------- +Wed Aug 22 09:54:57 UTC 2018 - asarai@suse.com + +- Upgrade to docker-ce v18.06.1-ce. bsc#1102522 bsc#1113313 + Upstream changelog: + https://github.com/docker/docker-ce/releases/tag/v18.06.1-ce +- Remove patches that were merged upstream: + - bsc1102522-0001-18.06-disable-containerd-CRI-plugin.patch + +------------------------------------------------------------------- +Tue Aug 21 09:50:01 UTC 2018 - asarai@suse.com + +- Add a backport of https://github.com/docker/engine/pull/29 for the 18.06.0-ce + upgrade. This is a potential security issue (the CRI plugin was enabled by + default, which listens on a TCP port bound to 0.0.0.0) that will be fixed + upstream in the 18.06.1-ce upgrade. bsc#1102522 + + bsc1102522-0001-18.06-disable-containerd-CRI-plugin.patch + +------------------------------------------------------------------- +Tue Aug 21 09:39:57 UTC 2018 - rbrown@suse.com + +- Kubic: Make crio default, docker as alternative runtime + (boo#1104821) +- Provide kubernetes CRI config with docker-kubic-kubeadm-criconfig + subpackage + +------------------------------------------------------------------- +Thu Aug 16 02:00:31 UTC 2018 - asarai@suse.com + +- Merge -kubic packages back into the main Virtualization:containers packages. + This is done using _multibuild to add a "kubic" flavour, which is then used + to conditionally compile patches and other kubic-specific features. + bsc#1105000 +- Rework docker-rpmlintrc with the new _multibuild setup. + +------------------------------------------------------------------- +Wed Aug 1 09:40:59 UTC 2018 - asarai@suse.com + +- Enable seccomp support on SLE12, since libseccomp is now a new enough vintage + to work with Docker and containerd. fate#325877 + +------------------------------------------------------------------- +Tue Jul 31 09:48:16 UTC 2018 - asarai@suse.com + +- Upgrade to docker-ce v18.06.0-ce. bsc#1102522 +- Remove systemd-service dependency on containerd, which is now being started + by dockerd to align with upstream defaults. +- Removed the following patches as they are merged upstream: + - bsc1021227-0001-pkg-devmapper-dynamically-load-dm_task_deferred_remo.patch + - bsc1055676-0001-daemon-oci-obey-CL_UNPRIVILEGED-for-user-namespaced-.patch +- Rebased the following patches: + * bsc1073877-0001-apparmor-allow-receiving-of-signals-from-docker-kill.patch + * bsc1073877-0002-apparmor-clobber-docker-default-profile-on-start.patch + * bsc1100727-0001-build-add-buildmode-pie.patch + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Mon Jul 30 09:44:47 UTC 2018 - asarai@suse.com + +- Build the client binary with -buildmode=pie to fix issues on POWER. + bsc#1100727 + + bsc1100727-0001-build-add-buildmode-pie.patch + +------------------------------------------------------------------- +Fri Jun 29 08:35:56 UTC 2018 - asarai@suse.com + +- Update the AppArmor patchset again to fix a separate issue where changed + AppArmor profiles don't actually get applied on Docker daemon reboot. + bsc#1099277 + * bsc1073877-0001-apparmor-allow-receiving-of-signals-from-docker-kill.patch + + bsc1073877-0002-apparmor-clobber-docker-default-profile-on-start.patch + +------------------------------------------------------------------- +Tue Jun 5 11:24:35 UTC 2018 - asarai@suse.com + +- Update to AppArmor patch so that signal mediation also works for signals + between in-container processes. bsc#1073877 + * bsc1073877-0001-apparmor-allow-receiving-of-signals-from-docker-kill.patch + +------------------------------------------------------------------- +Tue Jun 5 08:41:07 UTC 2018 - dcassany@suse.com + +- Make use of %license macro + +------------------------------------------------------------------- +Tue Jun 5 06:38:40 UTC 2018 - asarai@suse.com + +- Remove 'go test' from %check section, as it has only ever caused us problems + and hasn't (as far as I remember) ever caught a release-blocking issue. Smoke + testing has been far more useful. boo#1095817 + +------------------------------------------------------------------- +Tue May 29 08:10:48 UTC 2018 - asarai@suse.com + +- Update secrets patch to not log incorrect warnings when attempting to inject + non-existent host files. bsc#1065609 + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Wed May 16 10:12:56 UTC 2018 - jmassaguerpla@suse.com + +- Review Obsoletes to fix bsc#1080978 + +------------------------------------------------------------------- +Thu Apr 12 12:49:25 UTC 2018 - fcastelli@suse.com + +- Put docker under the podruntime slice. This the recommended + deployment to allow fine resource control on Kubernetes. + bsc#1086185 + +------------------------------------------------------------------- +Tue Apr 10 09:25:43 UTC 2018 - mmeister@suse.com + +- Add patch to handle AppArmor changes that make 'docker kill' stop working. + bsc#1073877 boo#1089732 + + bsc1073877-0001-apparmor-allow-receiving-of-signals-from-docker-kill.patch + +------------------------------------------------------------------- +Fri Apr 6 04:21:28 UTC 2018 - asarai@suse.com + +- Fix manpage generation breaking ppc64le builds due to a missing + -buildemode=pie. + +------------------------------------------------------------------- +Wed Apr 4 12:27:29 UTC 2018 - vrothberg@suse.com + +- Compile and install all manpages. + bsc#1085117 + +------------------------------------------------------------------- +Tue Mar 27 10:13:41 UTC 2018 - asarai@suse.com + +- Add requirement for catatonit, which provides a docker-init implementation. + fate#324652 bsc#1085380 + +------------------------------------------------------------------- +Thu Mar 8 13:14:54 UTC 2018 - vrothberg@suse.com + +- Fix private-registry-0001-Add-private-registry-mirror-support.patch to + deal corretly with TLS configs of 3rd party registries. + fix bsc#1084533 + +------------------------------------------------------------------- +Tue Feb 13 10:45:58 UTC 2018 - asarai@suse.com + +- Update patches to be sourced from https://github.com/suse/docker-ce (which + are based on the upstream docker/docker-ce repo). The reason for this change + (though it is functionally identical to the old patches) is so that public + patch maintenance is much simpler. + * bsc1021227-0001-pkg-devmapper-dynamically-load-dm_task_deferred_remo.patch + * bsc1055676-0001-daemon-oci-obey-CL_UNPRIVILEGED-for-user-namespaced-.patch + * private-registry-0001-Add-private-registry-mirror-support.patch + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Mon Feb 12 10:52:33 UTC 2018 - rbrown@suse.com + +- Add ${version} to equivalent non-kubic package provides + +------------------------------------------------------------------- +Thu Feb 8 12:34:51 UTC 2018 - rbrown@suse.com + +- Add Provides for equivalent non-kubic packages + +------------------------------------------------------------------- +Tue Jan 30 12:27:44 UTC 2018 - vrothberg@suse.com + +- Disable all tests for docker/client and docker/pkg/discovery. The unit tests + of those packages broke reproducibly the builds in IBS. + +------------------------------------------------------------------- +Mon Jan 29 14:39:02 UTC 2018 - vrothberg@suse.com + +- Disable flaky tests github.com/docker/docker/pkg/discovery/kv. + +------------------------------------------------------------------- +Fri Jan 26 07:15:53 UTC 2018 - vrothberg@suse.com + +- Add patch to support mirroring of private/non-upstream registries. As soon as + the upstream PR (https://github.com/moby/moby/pull/34319) is merged, this + patch will be replaced by the backported one from upstream. + + private-registry-0001-Add-private-registry-mirror-support.patch + fix bsc#1074971 + +------------------------------------------------------------------- +Fri Jan 19 14:12:32 UTC 2018 - asarai@suse.com + +- Add Obsoletes: docker-image-migrator, as the tool is no longer needed and + we've pretty much removed it from everywhere except the containers module. + bsc#1069758 + +------------------------------------------------------------------- +Fri Jan 19 07:48:10 UTC 2018 - vrothberg@suse.com + +- Remove requirement on bridge-utils, which has been replaced by libnetwork in + Docker. bsc#1072798 + +------------------------------------------------------------------- +Mon Dec 18 12:32:35 UTC 2017 - asarai@suse.com + +- Update to Docker v17.09.1_ce (bsc#1069758). Upstream changelog: + https://github.com/docker/docker-ce/releases/tag/v17.09.1-ce +- Removed patches (merged upstream): + - bsc1045628-0001-devicemapper-remove-container-rootfs-mountPath-after.patch + - bsc1066210-0001-vendor-update-to-github.com-vbatts-tar-split-v0.10.2.patch + - bsc1066801-0001-oci-add-proc-scsi-to-masked-paths.patch + +------------------------------------------------------------------- +Mon Dec 18 12:32:35 UTC 2017 - asarai@suse.com + +- Update to Docker v17.09.0_ce. Upstream changelog: + https://github.com/docker/docker-ce/releases/tag/v17.09.0-ce +- Rebased patches: + * bsc1021227-0001-pkg-devmapper-dynamically-load-dm_task_deferred_remo.patch + * bsc1045628-0001-devicemapper-remove-container-rootfs-mountPath-after.patch + * bsc1055676-0001-daemon-oci-obey-CL_UNPRIVILEGED-for-user-namespaced-.patch + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch +- Removed patches (merged upstream): + - bsc1064781-0001-Allow-to-override-build-date.patch + +------------------------------------------------------------------- +Tue Dec 5 10:58:07 UTC 2017 - asarai@suse.com + +- Add a patch to dynamically probe whether libdevmapper supports + dm_task_deferred_remove. This is necessary because we build the containers + module on a SLE12 base, but later SLE versions have libdevmapper support. + This should not affect openSUSE, as all openSUSE versions have a new enough + libdevmapper. Backport of https://github.com/moby/moby/pull/35518. + bsc#1021227 bsc#1029320 bsc#1058173 + + bsc1021227-0001-pkg-devmapper-dynamically-load-dm_task_deferred_remo.patch + +------------------------------------------------------------------- +Mon Dec 4 12:22:29 UTC 2017 - asarai@suse.com + +- Fix up the ordering of tests in docker.spec. This is to keep things easier to + backport into the SLE package. + +------------------------------------------------------------------- +Thu Nov 30 10:15:20 UTC 2017 - asarai@suse.com + +- Include secrets fix to handle "old" containers that have orphaned secret + data. It's not clear why Docker caches these secrets, but fix the problem by + trashing the references manually. bsc#1057743 + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Thu Nov 23 13:48:08 UTC 2017 - rbrown@suse.com + +- Replace references to /var/adm/fillup-templates with new + %_fillupdir macro (boo#1069468) + +------------------------------------------------------------------- +Tue Nov 14 22:39:56 UTC 2017 - asarai@suse.com + +- Remove migration code for the v1.9.x -> v1.10.x migration. This has been + around for a while, and we no longer support migrating from such an old + version "nicely". Docker still has migration code that will run on + first-boot, we are merely removing all of the "nice" warnings which tell + users how to avoid issues during an upgrade that ocurred more than a year + ago. +- Drop un-needed files: + - docker-plugin-message.txt + - docker-update-message.txt + +------------------------------------------------------------------- +Tue Nov 7 16:47:01 UTC 2017 - asarai@suse.com + +- Add a backport of https://github.com/moby/moby/pull/35424, which fixes a + security issue where a maliciously crafted image could be used to crash a + Docker daemon. bsc#1066210 CVE-2017-14992 + + bsc1066210-0001-vendor-update-to-github.com-vbatts-tar-split-v0.10.2.patch + +------------------------------------------------------------------- +Tue Nov 7 09:00:31 UTC 2017 - asarai@suse.com + +- Add a backport of https://github.com/moby/moby/pull/35399, which fixes a + security issue where a Docker container (with a disabled AppArmor profile) + could write to /proc/scsi/... and subsequently DoS the host. bsc#1066801 + CVE-2017-16539 + + bsc1066801-0001-oci-add-proc-scsi-to-masked-paths.patch + +------------------------------------------------------------------- +Tue Oct 24 06:50:29 UTC 2017 - asarai@suse.com + +- Correctly set `docker version` information, including the version, git + commit, and SOURCE_DATE_EPOCH (requires a backport). This should + *effectively* make Docker builds reproducible, with minimal cost. boo#1064781 + + bsc1064781-0001-Allow-to-override-build-date.patch + +------------------------------------------------------------------- +Mon Oct 16 11:06:22 UTC 2017 - asarai@suse.com + +- Add backport of https://github.com/moby/moby/pull/35205. This used to be + fixed in docker-runc, but we're moving it here after upstream discussion. + bsc#1055676 + + bsc1055676-0001-daemon-oci-obey-CL_UNPRIVILEGED-for-user-namespaced-.patch + +------------------------------------------------------------------- +Mon Oct 9 11:36:59 UTC 2017 - asarai@suse.com + +- Update to Docker v17.07.0_ce. Upstream changelog: + https://github.com/docker/docker-ce/releases/tag/v17.06.0-ce + https://github.com/docker/docker-ce/releases/tag/v17.07.0-ce +- Removed no-longer needed patches. + - bsc1037436-0001-client-check-tty-before-creating-exec-job.patch + - bsc1037607-0001-apparmor-make-pkg-aaparser-work-on-read-only-root.patch + - integration-cli-fix-TestInfoEnsureSucceeds.patch +- Added backport of https://github.com/moby/moby/pull/34573. bsc#1045628 + + bsc1045628-0001-devicemapper-remove-container-rootfs-mountPath-after.patch +- Rewrite secrets patches to correctly handle directories in a way that doesn't + cause errors when starting new containers. + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Mon Oct 2 08:12:17 UTC 2017 - vrothberg@suse.com + +- Fix bsc#1059011 + + The systemd service helper script used a timeout of 60 seconds to + start the daemon, which is insufficient in cases where the daemon + takes longer to start. Instead, set the service type from 'simple' to + 'notify' and remove the now superfluous helper script. + +------------------------------------------------------------------- +Wed Sep 27 15:04:19 UTC 2017 - jmassaguerpla@suse.com + +- fix bsc#1057743: Add a Requires: fix_bsc_1057743 which is provided by the + newer version of docker-libnetwork. This is necessary because of a versioning + bug we found in bsc#1057743. + +------------------------------------------------------------------- +Fri Sep 15 15:32:49 UTC 2017 - jmassaguerpla@suse.com + +- fix /var/adm/update-message/docker file name to be + /var/adm/update-message/docker-%{version}-%{release} + +------------------------------------------------------------------- +Wed Sep 6 11:42:31 UTC 2017 - asarai@suse.com + +- devicemapper: add patch to make the dm storage driver remove a container's + rootfs mountpoint before attempting to do libdm operations on it. This helps + avoid complications when live mounts will leak into containers. Backport of + https://github.com/moby/moby/pull/34573. bsc#1045628 + + bsc1045628-0001-devicemapper-remove-container-rootfs-mountPath-after.patch + +------------------------------------------------------------------- +Wed Aug 30 14:58:52 UTC 2017 - asarai@suse.com + +- Fix a regression in our SUSE secrets patches, which caused the copied files + to not carry the correct {uid,gid} mapping when using user namespaces. This + would not cause any bugs (SUSEConnect does the right thing anyway) but it's + possible some programs would not treat the files correctly. This is + tangentially related to bsc#1055676. + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Wed Aug 2 13:37:16 UTC 2017 - asarai@suse.com + +- Use -buildmode=pie for tests and binary build. bsc#1048046 bsc#1051429 + +------------------------------------------------------------------- +Wed Jul 19 18:12:26 UTC 2017 - jmassaguerpla@suse.com + +- enable deferred removal for sle12sp2 and newer (and openSUSE + equivalent. fix bsc#1021227 + +------------------------------------------------------------------- +Wed Jul 19 17:17:04 UTC 2017 - jmassaguerpla@suse.com + +- enable libseccomp on sle12sp2 and newer, 42.2 and newer + fix bsc#1028638 - docker: conditional filtering not supported on + libseccomp for sle12 + +------------------------------------------------------------------- +Tue Jul 11 10:50:12 UTC 2017 - jmassaguerpla@suse.com + +- add SuSEfirewall2.service to the After clause in docker.service + in order to fix bsc#1046024 + +------------------------------------------------------------------- +Fri Jul 7 14:53:59 UTC 2017 - thipp@suse.de + +- fix path to docker-runc in systemd service file + +------------------------------------------------------------------- +Thu Jul 6 14:18:29 UTC 2017 - thipp@suse.de + +- change dependency to docker-runc + +------------------------------------------------------------------- +Mon Jun 19 10:54:36 UTC 2017 - jmassaguerpla@suse.com + +- Fix bsc#1029630: docker does not wait for lvm on system startup + +I added "lvm2-monitor.service" as an "After dependency" of the docker systemd +unit. + +------------------------------------------------------------------- +Tue May 30 11:29:45 UTC 2017 - jmassaguerpla@suse.com + +- Fix bsc#1032287: missing docker systemd configuration + +------------------------------------------------------------------- +Mon May 29 11:08:44 UTC 2017 - asarai@suse.com + +- Update SUSE secrets patch to correctly handle restarting of containers. + + secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + + secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Wed May 17 14:41:29 UTC 2017 - asarai@suse.com + +- Fix bsc#1037607 which was causing read-only issues on Kubic, this is a + backport of https://github.com/moby/moby/pull/33250. + + bsc1037607-0001-apparmor-make-pkg-aaparser-work-on-read-only-root.patch + +------------------------------------------------------------------- +Thu May 11 07:36:32 UTC 2017 - tchvatal@suse.com + +- Fix bsc#1038476 warning about non-executable docker + * Simply verify we have binary prior using it, might happen if + someone had docker installed and then did remove it and install + from scratch again + +------------------------------------------------------------------- +Wed May 10 13:54:44 UTC 2017 - asarai@suse.com + +- Add a partial fix for boo#1038493. +- Fixed bsc#1037436 where execids were being leaked due to bad error handling. + This is a backport of https://github.com/docker/cli/pull/52. + + bsc1037436-0001-client-check-tty-before-creating-exec-job.patch + +------------------------------------------------------------------- +Thu May 4 19:03:40 UTC 2017 - jmassaguerpla@suse.com + +- Fix golang requirements in the subpackages + +------------------------------------------------------------------- +Mon May 1 07:57:35 UTC 2017 - fcastelli@suse.com + +- Update golang build requirements to use golang(API) symbol: this is + needed to solve a conflict between multiple versions of Go being available + +------------------------------------------------------------------- +Tue Apr 18 15:38:11 UTC 2017 - jmassaguerpla@suse.com + +- Fix secrets-0002-SUSE-implement-SUSE-container-secrets.patch: + substitute docker/distribution/digest by opencontainers/digest + +------------------------------------------------------------------- +Thu Apr 13 14:34:35 UTC 2017 - jmassaguerpla@suse.com + +- Update to version 17.04.0-ce (fix bsc#1034053 ) + +- Patches removed because have been merged into this version: + * pr31549-cmd-docker-fix-TestDaemonCommand.patch + * pr31773-daemon-also-ensureDefaultApparmorProfile-in-exec-pat.patch +- Patches rebased: + * integration-cli-fix-TestInfoEnsureSucceeds.patch +- Build man pages for all archs (bsc#953182) +- Containers cannot resolve DNS if docker host uses 127.0.0.1 as resolver (bsc#1034063) + +see /usr/share/doc/packages/docker/CHANGELOG.md + +------------------------------------------------------------------- +Wed Apr 12 09:54:18 UTC 2017 - jmassaguerpla@suse.com + +- Make sure this is being built with go 1.7 + +------------------------------------------------------------------- +Wed Apr 12 09:14:35 UTC 2017 - jmassaguerpla@suse.com + +- remove the go_arches macro because we are using go1.7 which + is available in all archs + +- remove gcc specific patches + * gcc-go-patches.patch + * netlink_netns_powerpc.patch + * boltdb_bolt_add_brokenUnaligned.patch + +------------------------------------------------------------------- +Wed Apr 12 07:58:08 UTC 2017 - asarai@suse.com + +- Enable Delegate=yes, since systemd will safely ignore lvalues it doesn't + understand. + +------------------------------------------------------------------- +Tue Apr 11 11:49:05 UTC 2017 - asarai@suse.com + +- Update SUSE secrets patch to handle boo#1030702. + * secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + * secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Tue Apr 11 08:28:33 UTC 2017 - mmeister@suse.com + +- Fix (bsc#1032644) + + Change lvm2 from Requires to Recommends + + Docker usually uses a default storage driver, when it's not configured + explicitly. This default driver then depends on the underlying + system and gets chosen during installation. + +------------------------------------------------------------------- +Mon Mar 20 08:12:01 UTC 2017 - jmassaguerpla@suse.com + +- Disable libseccomp for leap 42.1, sle12sp1 and sle12, because + docker needs a higher version. Otherwise, we get the error + "conditional filtering requires libseccomp version >= 2.2.1 + (bsc#1028639 and bsc#1028638) + +------------------------------------------------------------------- +Fri Mar 17 11:08:03 UTC 2017 - asarai@suse.com + +- Add a backport of fix to AppArmor lazy loading docker-exec case. + https://github.com/docker/docker/pull/31773 + + pr31773-daemon-also-ensureDefaultApparmorProfile-in-exec-pat.patch + +------------------------------------------------------------------- +Wed Mar 8 00:48:46 UTC 2017 - asarai@suse.com + +- Clean up docker-mount-secrets.patch to use the new swarm secrets internals of + Docker 1.13.0, which removes the need to implement any secret handling + ourselves. This resulted in a split up of the patch. + - docker-mount-secrets.patch + + secrets-0001-daemon-allow-directory-creation-in-run-secrets.patch + + secrets-0002-SUSE-implement-SUSE-container-secrets.patch + +------------------------------------------------------------------- +Mon Mar 6 15:31:02 UTC 2017 - jmassaguerpla@suse.com + +- Remove old plugins.json to prevent docker-1.13 to fail to start + +------------------------------------------------------------------- +Mon Mar 6 12:52:14 UTC 2017 - jmassaguerpla@suse.com + +- Fix bsc#1026827: systemd TasksMax default throttles docker + +------------------------------------------------------------------- +Mon Mar 6 10:09:14 UTC 2017 - jmassaguerpla@suse.com + +- Fix post section by adding shadow as a package requirement + Otherwise the groupadd instruction fails + +------------------------------------------------------------------- +Sun Mar 5 04:54:52 UTC 2017 - asarai@suse.com + +- Add patch to fix TestDaemonCommand failure in %check. This is an upstream + bug, and has an upstream PR to fix it https://github.com/docker/docker/pull/31549. + + pr31549-cmd-docker-fix-TestDaemonCommand.patch + +------------------------------------------------------------------- +Wed Feb 1 15:59:40 UTC 2017 - jmassaguerpla@suse.com + +- update docker to 1.13.0 + + see details in https://github.com/docker/docker/releases/tag/v1.13.0 + +- use the same buildflags for building docker and for building the + tests. + +- enable pkcs11: + https://github.com/docker/docker/commit/37fa75b3447007bb8ea311f02610bb383b0db77f + +------------------------------------------------------------------- +Fri Jan 27 12:30:18 UTC 2017 - bg@suse.com + +- enable architecture s390x for openSUSE + +------------------------------------------------------------------- +Thu Jan 26 15:43:38 UTC 2017 - jmassaguerpla@suse.com + +- provide the oci runtime so that containers which were using an old + runtime option, when started on the new docker version, the runtime + is changed to the new one. fix bsc#1020806 bsc#1016992 + +------------------------------------------------------------------- +Fri Jan 13 13:56:15 UTC 2017 - jmassaguerpla@suse.com + +- fix CVE-2016-9962 bsc#1012568 . Fix it by updating to 1.12.6 + plus an extra commit to fix liverestore: + https://github.com/docker/docker/commit/97cd32a6a9076306baa637a29bba84c3f1f3d218 + +------------------------------------------------------------------- +Wed Jan 11 12:47:16 UTC 2017 - jmassaguerpla@suse.com + +- add "a wait" when starting docker service to fix + bsc#1019251 + +------------------------------------------------------------------- +Tue Dec 20 12:41:33 UTC 2016 - normand@linux.vnet.ibm.com + +- remove netlink_gcc_go.patch after integration of PR + https://github.com/golang/go/issues/11707 +- new boltdb_bolt_add_brokenUnaligned.patch for ppc64 + waiting for https://github.com/boltdb/bolt/pull/635 + +------------------------------------------------------------------- +Tue Dec 20 05:08:54 UTC 2016 - asarai@suse.com + +- Remove old flags from dockerd's command-line, to be more inline with + upstream (now that docker-runc is provided by the runc package). -H is + dropped because upstream dropped it due to concerns with socket + activation. +- Remove socket activation entirely. + +------------------------------------------------------------------- +Mon Dec 19 12:41:13 UTC 2016 - jmassaguerpla@suse.com + +- update docker to 1.12.5 (bsc#1016307). + This fixes bsc#1015661 + +------------------------------------------------------------------- +Mon Dec 5 14:52:02 UTC 2016 - jmassaguerpla@suse.com + +- fix bash-completion + +------------------------------------------------------------------- +Tue Nov 29 21:57:08 UTC 2016 - jimmy@boombatower.com + +- Add packageand(docker:bash) to bash-completion to match zsh-completion. + +------------------------------------------------------------------- +Thu Nov 24 16:09:52 UTC 2016 - jmassaguerpla@suse.com + +- fix runc and containerd revisions + fix bsc#1009961 + +------------------------------------------------------------------- +Thu Oct 27 11:13:56 UTC 2016 - jmassaguerpla@suse.com + +- update docker to 1.12.3 + - fix bsc#1007249 - CVE-2016-8867: Fix ambient capability usage in containers + - other fixes: + https://github.com/docker/docker/releases/tag/v1.12.3 + +------------------------------------------------------------------- +Thu Oct 13 11:15:17 UTC 2016 - jmassaguerpla@suse.com + +- update docker to 1.12.2 (bsc#1004490). See changelog + +https://github.com/docker/docker/blob/v1.12.2/CHANGELOG.md + +- update docker-mount-secrets.patch to 1.12.2 code + +------------------------------------------------------------------- +Tue Oct 11 09:36:23 UTC 2016 - asarai@suse.com + +- docker-mount-secrets.patch: change the internal mountpoint name to not use + ":" as that character can be considered a special character by other tools. + bsc#999582 + +------------------------------------------------------------------- +Mon Sep 19 11:56:15 UTC 2016 - jmassaguerpla@suse.com + +- fix go_arches definition: use global instead of define, otherwise + it fails to build + +------------------------------------------------------------------- +Wed Sep 14 09:41:57 UTC 2016 - asarai@suse.com + +- Add dockerd(8) man page. + +------------------------------------------------------------------- +Fri Sep 9 12:42:24 UTC 2016 - thipp@suse.de + +- add missing patch to changelog + +------------------------------------------------------------------- +Wed Sep 7 16:33:59 UTC 2016 - thipp@suse.de + +- fix integration test case +- add integration-cli-fix-TestInfoEnsureSucceeds.patch + +------------------------------------------------------------------- +Tue Sep 6 13:28:38 UTC 2016 - thipp@suse.de + +- update rpmlintrc + +------------------------------------------------------------------- +Fri Sep 2 12:02:23 UTC 2016 - thipp@suse.de + +- make test timeout configurable + +------------------------------------------------------------------- +Fri Sep 2 10:25:27 UTC 2016 - asarai@suse.com + +- Remove noarch from docker-test, which was causing lots of fun issues when + trying to run them. + +------------------------------------------------------------------- +Tue Aug 30 09:07:19 UTC 2016 - jmassaguerpla@suse.com + +- Fix build for ppc64le: use static libgo for dockerd and docker-proxy + as in docker build. + +------------------------------------------------------------------- +Mon Aug 29 12:11:21 UTC 2016 - jmassaguerpla@suse.com + +- Update docker to 1.12.1 (bsc#996015) + + see changelog in https://github.com/docker/docker/releases/tag/v1.12.1 + +------------------------------------------------------------------- +Fri Aug 26 12:02:35 UTC 2016 - cbrauner@suse.de + +- Add asaurin@suse.com's test.sh test script. +- Add integration test binary in docker.spec file. This is work done by + asaurin@suse.com. + +------------------------------------------------------------------- +Fri Aug 26 10:43:38 UTC 2016 - asarai@suse.com + +- Package docker-proxy (which was split out of the docker binary in 1.12). + boo#995620 + +------------------------------------------------------------------- +Fri Aug 26 10:00:36 UTC 2016 - jmassaguerpla@suse.com + +- fix bsc#995102 - Docker "migrator" prevents installing "docker", + if docker 1.9 was installed before but there were no images + +------------------------------------------------------------------- +Fri Aug 26 08:49:15 UTC 2016 - asarai@suse.com + +- Update docker.service file with several changes. + * Reapply fix for bsc#983015 (Limit*=infinity). + * Specify an "OCI" runtime for our runc package explicitly. bsc#978260 + +------------------------------------------------------------------- +Thu Aug 25 14:02:04 UTC 2016 - jmassaguerpla@suse.com + +- remove disable-pprof-trace.patch: We can remove this patch because + we use go 1.6, either gcc6-go or gc-go. This patch was for gcc5-go + +------------------------------------------------------------------- +Wed Aug 24 12:31:23 UTC 2016 - jmassaguerpla@suse.com + +- add go_arches in project configuration: this way, we can use the + same spec file but decide in the project configuration if to + use gc-go or gcc-go for some archs. + +------------------------------------------------------------------- +Tue Aug 23 11:35:09 UTC 2016 - jmassaguerpla@suse.com + +- use gcc6-go instead of gcc5-go (bsc#988408) +- build ppc64le with gc-go because this version builds with gc-go 1.6 +- remove bnc964673-boltdb-metadata-recovery.patch because it has already + been merged + +------------------------------------------------------------------- +Tue Aug 23 11:34:09 UTC 2016 - cbrauner@suse.com + +- update to v1.12.0 (bsc#995058) + see detailed changelog at + https://github.com/docker/docker/releases/tag/v1.12.0 +- disable test that fail in obs build context +- only run unit tests on architectures that provide the go list and go test + tools +- disable dockerd, parser, integration test, and devicemapper related tests + on versions below SLE12 and openSUSE_13.2 +- bump test timeout to 10m (for aarch64) +- run unit tests during the build +- Adapt docker.service file. +- adapt install sections for gccgo builds: gccgo build are not built in separate + folders for client and daemon. They both reside in dyngccgo. +- gcc-go-patch: link against systemd when compiling the daemon. +- Add disable-pprof-trace.patch + pprof.Trace() is not available in go version <= 1.4 which we use to build SLES + packages. This patch comments out the pprof.Trace() section. +- update gcc-go-patch and docker-mount-secrets.patch + +------------------------------------------------------------------- +Tue Aug 23 11:34:09 UTC 2016 - tboerger@suse.com + +- Fixed binary split, install both required binaries correctly + +------------------------------------------------------------------- +Tue Aug 16 09:39:11 UTC 2016 - asarai@suse.com + +* Explicitly state the version dependencies for runC and containerd, to + avoid potential issues with incompatible component versions. These + must be updated *each time we do a release*. bsc#993847 + +------------------------------------------------------------------- +Mon Jul 25 05:34:50 UTC 2016 - sflees@suse.de + +- Don't exit mid install, add the ability to not restart the docker + service during certain updates with long migration phases + bsc#980555 + +------------------------------------------------------------------- +Tue Jul 19 17:03:32 UTC 2016 - jmassaguerpla@suse.com + +- remove kernel dependency (bsc#987198) + +------------------------------------------------------------------- +Wed Jul 13 13:41:33 UTC 2016 - cbrauner@suse.de + +- remove sysconfig.docker.ppc64le patch + setting iptables option on ppc64le works now (bsc#988707) + +------------------------------------------------------------------- +Tue Jul 5 17:52:58 UTC 2016 - jmassaguerpla@suse.com + +- fix bsc#984942: audit.rules in docker-1.9.1-58.1.x86_64.rpm has a + syntax error + +------------------------------------------------------------------- +Tue Jul 5 14:26:45 UTC 2016 - asarai@suse.com + +* Update docker.service to include changes from upstream, including the + soon-to-be-merged patch https://github.com/docker/docker/pull/24307, + which fixes bnc#983015. + +------------------------------------------------------------------- +Fri Jun 24 00:23:57 UTC 2016 - dmueller@suse.com + +- readd dropped declaration for patch200 + +------------------------------------------------------------------- +Wed Jun 8 14:42:08 UTC 2016 - asarai@suse.de + +* Removed patches: + - cve-2016-3697-numeric-uid.patch (merged upstream in gh@docker/docker#22998). +* Update Docker to 1.11.2. (bsc#989566) Changelog from upstream: + +* Networking + * Fix a stale endpoint issue on overlay networks during ungraceful restart + (#23015) + * Fix an issue where the wrong port could be reported by docker + inspect/ps/port (#22997) + +* Runtime + * Fix a potential panic when running docker build (#23032) + * Fix interpretation of --user parameter (#22998) + * Fix a bug preventing container statistics to be correctly reported (#22955) + * Fix an issue preventing container to be restarted after daemon restart + (#22947) + * Fix issues when running 32 bit binaries on Ubuntu 16.04 (#22922) + * Fix a possible deadlock on image deletion and container attach (#22918) + * Fix an issue where containers fail to start after a daemon restart if they + depend on a containerized cluster store (#22561) + * Fix an issue causing docker ps to hang on CentOS when using devicemapper + (#22168, #23067) + * Fix a bug preventing to docker exec into a container when using + devicemapper (#22168, #23067) + +------------------------------------------------------------------- +Fri May 20 10:26:39 UTC 2016 - jmassaguerpla@suse.com + +- Fix udev files ownership + +------------------------------------------------------------------- +Thu May 19 13:43:44 UTC 2016 - tchvatal@suse.com + +- Pass over with spec-cleaner, no factual changes + +------------------------------------------------------------------- +Wed May 18 14:21:09 UTC 2016 - asarai@suse.de + +* Make sure we *always* build unstripped Go binaries. + +------------------------------------------------------------------- +Mon May 16 13:55:07 UTC 2016 - asarai@suse.de + +* Add a patch to fix database soft corruption issues if the Docker dameon dies + in a bad state. There is a PR upstream to vendor Docker to have this fix as + well, but it probably won't get in until 1.11.2. bnc#964673 + (https://github.com/docker/docker/pull/22765) + + + bnc964673-boltdb-metadata-recovery.patch + +------------------------------------------------------------------- +Mon May 2 07:40:22 UTC 2016 - asarai@suse.de + +* Remove conditional Patch directive for SUSE secrets, since conditionally + including patches results in incompatible .src.rpms. The patch is still + applied conditionally. + +------------------------------------------------------------------- +Fri Apr 29 09:04:54 UTC 2016 - asarai@suse.de + +* Update to Docker 1.11.1. Changelog from upstream: + + * Distribution + - Fix schema2 manifest media type to be of type `application/vnd.docker.container.image.v1+json` ([#21949](https://github.com/docker/docker/pull/21949)) + + * Documentation + + Add missing API documentation for changes introduced with 1.11.0 ([#22048](https://github.com/docker/docker/pull/22048)) + + * Builder + * Append label passed to `docker build` as arguments as an implicit `LABEL` command at the end of the processed `Dockerfile` ([#22184](https://github.com/docker/docker/pull/22184)) + + * Networking + - Fix a panic that would occur when forwarding DNS query ([#22261](https://github.com/docker/docker/pull/22261)) + - Fix an issue where OS threads could end up within an incorrect network namespace when using user defined networks ([#22261](https://github.com/docker/docker/pull/22261)) + + * Runtime + - Fix a bug preventing labels configuration to be reloaded via the config file ([#22299](https://github.com/docker/docker/pull/22299)) + - Fix a regression where container mounting `/var/run` would prevent other containers from being removed ([#22256](https://github.com/docker/docker/pull/22256)) + - Fix an issue where it would be impossible to update both `memory-swap` and `memory` value together ([#22255](https://github.com/docker/docker/pull/22255)) + - Fix a regression from 1.11.0 where the `/auth` endpoint would not initialize `serveraddress` if it is not provided ([#22254](https://github.com/docker/docker/pull/22254)) + - Add missing cleanup of container temporary files when cancelling a schedule restart ([#22237](https://github.com/docker/docker/pull/22237)) + - Removed scary error message when no restart policy is specified ([#21993](https://github.com/docker/docker/pull/21993)) + - Fix a panic that would occur when the plugins were activated via the json spec ([#22191](https://github.com/docker/docker/pull/22191)) + - Fix restart backoff logic to correctly reset delay if container ran for at least 10secs ([#22125](https://github.com/docker/docker/pull/22125)) + - Remove error message when a container restart get cancelled ([#22123](https://github.com/docker/docker/pull/22123)) + - Fix an issue where `docker` would not correcly clean up after `docker exec` ([#22121](https://github.com/docker/docker/pull/22121)) + - Fix a panic that could occur when servicing concurrent `docker stats` commands ([#22120](https://github.com/docker/docker/pull/22120))` + - Revert deprecation of non-existing host directories auto-creation ([#22065](https://github.com/docker/docker/pull/22065)) + - Hide misleading rpc error on daemon shutdown ([#22058](https://github.com/docker/docker/pull/22058)) + +------------------------------------------------------------------- +Wed Apr 27 10:29:47 UTC 2016 - jmassaguerpla@suse.com + +- Fix go version to 1.5 (bsc#977394) + +------------------------------------------------------------------- +Fri Apr 22 10:43:37 UTC 2016 - asarai@suse.de + +- Add patch to fix vulnerability in Docker <= 1.11.0. This patch is upstream, + but was merged after the 1.11.0 merge window. CVE-2016-3697. bsc#976777. + + cve-2016-3697-numeric-uid.patch + The upstream PR is here[1] and was vendored into Docker here[2]. + + [1]: https://github.com/opencontainers/runc/pull/708 + [2]: https://github.com/docker/docker/pull/21665 + +------------------------------------------------------------------- +Mon Apr 18 19:33:56 UTC 2016 - mpluskal@suse.com + +- Supplemnent zsh from zsh-completion + * zsh-completion will be automatically installed if zsh and + docker are installed + +------------------------------------------------------------------- +Mon Apr 18 15:44:11 UTC 2016 - jmassaguerpla@suse.com + +- Remove gcc5_socker_workaround.patch: This patch is not needed anymore + since gcc5 has been updated in all platforms + +------------------------------------------------------------------- +Mon Apr 18 06:19:18 UTC 2016 - asarai@suse.de + +* Removed patches that have been fixed upstream and in gcc-go: + - boltdb_bolt_powerpc.patch + - fix-apparmor.patch + - fix-btrfs-ioctl-structure.patch + - fix-docker-init.patch + - libnetwork_drivers_bridge_powerpc.patch + - ignore-dockerinit-checksum.patch +* Require containerd, as it is the only currently supported Docker execdriver. +* Update docker.socket to require containerd.socket and use --containerd in + docker.service so that the services are self-contained. +* Update to Docker 1.11.0. Changelog from upstream: + + * Builder + - Fix a bug where Docker would not used the correct uid/gid when processing the `WORKDIR` command ([#21033](https://github.com/docker/docker/pull/21033)) + - Fix a bug where copy operations with userns would not use the proper uid/gid ([#20782](https://github.com/docker/docker/pull/20782), [#21162](https://github.com/docker/docker/pull/21162)) + + * Client + * Usage of the `:` separator for security option has been deprecated. `=` should be used instead ([#21232](https://github.com/docker/docker/pull/21232)) + + The client user agent is now passed to the registry on `pull`, `build`, `push`, `login` and `search` operations ([#21306](https://github.com/docker/docker/pull/21306), [#21373](https://github.com/docker/docker/pull/21373)) + * Allow setting the Domainname and Hostname separately through the API ([#20200](https://github.com/docker/docker/pull/20200)) + * Docker info will now warn users if it can not detect the kernel version or the operating system ([#21128](https://github.com/docker/docker/pull/21128)) + - Fix an issue where `docker stats --no-stream` output could be all 0s ([#20803](https://github.com/docker/docker/pull/20803)) + - Fix a bug where some newly started container would not appear in a running `docker stats` command ([#20792](https://github.com/docker/docker/pull/20792)) + * Post processing is no longer enabled for linux-cgo terminals ([#20587](https://github.com/docker/docker/pull/20587)) + - Values to `--hostname` are now refused if they do not comply with [RFC1123](https://tools.ietf.org/html/rfc1123) ([#20566](https://github.com/docker/docker/pull/20566)) + + Docker learned how to use a SOCKS proxy ([#20366](https://github.com/docker/docker/pull/20366), [#18373](https://github.com/docker/docker/pull/18373)) + + Docker now supports external credential stores ([#20107](https://github.com/docker/docker/pull/20107)) + * `docker ps` now supports displaying the list of volumes mounted inside a container ([#20017](https://github.com/docker/docker/pull/20017)) + * `docker info` now also report Docker's root directory location ([#19986](https://github.com/docker/docker/pull/19986)) + - Docker now prohibits login in with an empty username (spaces are trimmed) ([#19806](https://github.com/docker/docker/pull/19806)) + * Docker events attributes are now sorted by key ([#19761](https://github.com/docker/docker/pull/19761)) + * `docker ps` no longer show exported port for stopped containers ([#19483](https://github.com/docker/docker/pull/19483)) + - Docker now cleans after itself if a save/export command fails ([#17849](https://github.com/docker/docker/pull/17849)) + * Docker load learned how to display a progress bar ([#17329](https://github.com/docker/docker/pull/17329), [#120078](https://github.com/docker/docker/pull/20078)) + + * Distribution + - Fix a panic that occurred when pulling an images with 0 layers ([#21222](https://github.com/docker/docker/pull/21222)) + - Fix a panic that could occur on error while pushing to a registry with a misconfigured token service ([#21212](https://github.com/docker/docker/pull/21212)) + + All first-level delegation roles are now signed when doing a trusted push ([#21046](https://github.com/docker/docker/pull/21046)) + + OAuth support for registries was added ([#20970](https://github.com/docker/docker/pull/20970)) + * `docker login` now handles token using the implementation found in [docker/distribution](https://github.com/docker/distribution) ([#20832](https://github.com/docker/docker/pull/20832)) + * `docker login` will no longer prompt for an email ([#20565](https://github.com/docker/docker/pull/20565)) + * Docker will now fallback to registry V1 if no basic auth credentials are available ([#20241](https://github.com/docker/docker/pull/20241)) + * Docker will now try to resume layer download where it left off after a network error/timeout ([#19840](https://github.com/docker/docker/pull/19840)) + - Fix generated manifest mediaType when pushing cross-repository ([#19509](https://github.com/docker/docker/pull/19509)) + - Fix docker requesting additional push credentials when pulling an image if Content Trust is enabled ([#20382](https://github.com/docker/docker/pull/20382)) + + * Logging + - Fix a race in the journald log driver ([#21311](https://github.com/docker/docker/pull/21311)) + * Docker syslog driver now uses the RFC-5424 format when emitting logs ([#20121](https://github.com/docker/docker/pull/20121)) + * Docker GELF log driver now allows to specify the compression algorithm and level via the `gelf-compression-type` and `gelf-compression-level` options ([#19831](https://github.com/docker/docker/pull/19831)) + * Docker daemon learned to output uncolorized logs via the `--raw-logs` options ([#19794](https://github.com/docker/docker/pull/19794)) + + Docker, on Windows platform, now includes an ETW (Event Tracing in Windows) logging driver named `etwlogs` ([#19689](https://github.com/docker/docker/pull/19689)) + * Journald log driver learned how to handle tags ([#19564](https://github.com/docker/docker/pull/19564)) + + The fluentd log driver learned the following options: `fluentd-address`, `fluentd-buffer-limit`, `fluentd-retry-wait`, `fluentd-max-retries` and `fluentd-async-connect` ([#19439](https://github.com/docker/docker/pull/19439)) + + Docker learned to send log to Google Cloud via the new `gcplogs` logging driver. ([#18766](https://github.com/docker/docker/pull/18766)) + + * Misc + + When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/c)) + + Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325)) + + Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270)) + * The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266)) + - Fix a few response body leaks ([#21258](https://github.com/docker/docker/pull/21258)) + - Docker, when run as a service with systemd, will now properly manage its processes cgroups ([#20633](https://github.com/docker/docker/pull/20633)) + * Docker info now reports the value of cgroup KernelMemory or emits a warning if it is not supported ([#20863](https://github.com/docker/docker/pull/20863)) + * Docker info now also reports the cgroup driver in use ([#20388](https://github.com/docker/docker/pull/20388)) + * Docker completion is now available on PowerShell ([#19894](https://github.com/docker/docker/pull/19894)) + * `dockerinit` is no more ([#19490](https://github.com/docker/docker/pull/19490),[#19851](https://github.com/docker/docker/pull/19851)) + + Support for building Docker on arm64 was added ([#19013](https://github.com/docker/docker/pull/19013)) + + Experimental support for building docker.exe in a native Windows Docker installation ([#18348](https://github.com/docker/docker/pull/18348)) + + * Networking + - Fix panic if a node is forcibly removed from the cluster ([#21671](https://github.com/docker/docker/pull/21671)) + - Fix "error creating vxlan interface" when starting a container in a Swarm cluster ([#21671](https://github.com/docker/docker/pull/21671)) + * `docker network inspect` will now report all endpoints whether they have an active container or not ([#21160](https://github.com/docker/docker/pull/21160)) + + Experimental support for the MacVlan and IPVlan network drivers have been added ([#21122](https://github.com/docker/docker/pull/21122)) + * Output of `docker network ls` is now sorted by network name ([#20383](https://github.com/docker/docker/pull/20383)) + - Fix a bug where Docker would allow a network to be created with the reserved `default` name ([#19431](https://github.com/docker/docker/pull/19431)) + * `docker network inspect` returns whether a network is internal or not ([#19357](https://github.com/docker/docker/pull/19357)) + + Control IPv6 via explicit option when creating a network (`docker network create --ipv6`). This shows up as a new `EnableIPv6` field in `docker network inspect` ([#17513](https://github.com/docker/docker/pull/17513)) + * Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) + - Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) + * Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) + - Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) + - Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) + - Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) + - Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) + - Fixed bug in iptables.Exists() logic [#21019](https://github.com/docker/docker/pull/21019) + - Fixed a Veth interface leak when using overlay network ([#21019](https://github.com/docker/docker/pull/21019)) + - Fixed a bug which prevents docker reload after a network delete during shutdown ([#20214](https://github.com/docker/docker/pull/20214)) + - Make sure iptables chains are recreated on firewalld reload ([#20419](https://github.com/docker/docker/pull/20419)) + - Allow to pass global datastore during config reload ([#20419](https://github.com/docker/docker/pull/20419)) + - For anonymous containers use the alias name for IP to name mapping, ie:DNS PTR record ([#21019](https://github.com/docker/docker/pull/21019)) + - Fix a panic when deleting an entry from /etc/hosts file ([#21019](https://github.com/docker/docker/pull/21019)) + - Source the forwarded DNS queries from the container net namespace ([#21019](https://github.com/docker/docker/pull/21019)) + - Fix to retain the network internal mode config for bridge networks on daemon reload ([#21780] (https://github.com/docker/docker/pull/21780)) + - Fix to retain IPAM driver option configs on daemon reload ([#21914] (https://github.com/docker/docker/pull/21914)) + + * Plugins + - Fix a file descriptor leak that would occur every time plugins were enumerated ([#20686](https://github.com/docker/docker/pull/20686)) + - Fix an issue where Authz plugin would corrupt the payload body when faced with a large amount of data ([#20602](https://github.com/docker/docker/pull/20602)) + + * Runtime + - Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716)) + - Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692)) + - Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677)) + - Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666)) + Docker 1.9, but was decided to be too much of an backward-incompatible change, so it was decided to keep the feature. + + It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383)) + + `docker inspect ` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370)) + + Docker Windows gained a minimal `top` implementation ([#21354](https://github.com/docker/docker/pull/21354)) + * Docker learned to report the faulty exe when a container cannot be started due to its condition ([#21345](https://github.com/docker/docker/pull/21345)) + * Docker with device mapper will now refuse to run if `udev sync` is not available ([#21097](https://github.com/docker/docker/pull/21097)) + - Fix a bug where Docker would not validate the config file upon configuration reload ([#21089](https://github.com/docker/docker/pull/21089)) + - Fix a hang that would happen on attach if initial start was to fail ([#21048](https://github.com/docker/docker/pull/21048)) + - Fix an issue where registry service options in the daemon configuration file were not properly taken into account ([#21045](https://github.com/docker/docker/pull/21045)) + - Fix a race between the exec and resize operations ([#21022](https://github.com/docker/docker/pull/21022)) + - Fix an issue where nanoseconds were not correctly taken in account when filtering Docker events ([#21013](https://github.com/docker/docker/pull/21013)) + - Fix the handling of Docker command when passed a 64 bytes id ([#21002](https://github.com/docker/docker/pull/21002)) + * Docker will now return a `204` (i.e http.StatusNoContent) code when it successfully deleted a network ([#20977](https://github.com/docker/docker/pull/20977)) + - Fix a bug where the daemon would wait indefinitely in case the process it was about to killed had already exited on its own ([#20967](https://github.com/docker/docker/pull/20967) + * The devmapper driver learned the `dm.min_free_space` option. If the mapped device free space reaches the passed value, new device creation will be prohibited. ([#20786](https://github.com/docker/docker/pull/20786)) + + Docker can now prevent processes in container to gain new privileges via the `--security-opt=no-new-privileges` flag ([#20727](https://github.com/docker/docker/pull/20727)) + - Starting a container with the `--device` option will now correctly resolves symlinks ([#20684](https://github.com/docker/docker/pull/20684)) + + Docker now relies on [`containerd`](https://github.com/docker/containerd) and [`runc`](https://github.com/opencontainers/runc) to spawn containers. ([#20662](https://github.com/docker/docker/pull/20662)) + - Fix docker configuration reloading to only alter value present in the given config file ([#20604](https://github.com/docker/docker/pull/20604)) + + Docker now allows setting a container hostname via the `--hostname` flag when `--net=host` ([#20177](https://github.com/docker/docker/pull/20177)) + + Docker now allows executing privileged container while running with `--userns-remap` if both `--privileged` and the new `--userns=host` flag are specified ([#20111](https://github.com/docker/docker/pull/20111)) + - Fix Docker not cleaning up correctly old containers upon restarting after a crash ([#19679](https://github.com/docker/docker/pull/19679)) + * Docker will now error out if it doesn't recognize a configuration key within the config file ([#19517](https://github.com/docker/docker/pull/19517)) + - Fix container loading, on daemon startup, when they depends on a plugin running within a container ([#19500](https://github.com/docker/docker/pull/19500)) + * `docker update` learned how to change a container restart policy ([#19116](https://github.com/docker/docker/pull/19116)) + * `docker inspect` now also returns a new `State` field containing the container state in a human readable way (i.e. one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`)([#18966](https://github.com/docker/docker/pull/18966)) + + Docker learned to limit the number of active pids (i.e. processes) within the container via the `pids-limit` flags. NOTE: This requires `CGROUP_PIDS=y` to be in the kernel configuration. ([#18697](https://github.com/docker/docker/pull/18697)) + - `docker load` now has a `--quiet` option to suppress the load output ([#20078](https://github.com/docker/docker/pull/20078)) + - Fix a bug in neighbor discovery for IPv6 peers ([#20842](https://github.com/docker/docker/pull/20842)) + - Fix a panic during cleanup if a container was started with invalid options ([#21802](https://github.com/docker/docker/pull/21802)) + - Fix a situation where a container cannot be stopped if the terminal is closed ([#21840](https://github.com/docker/docker/pull/21840)) + + * Security + * Object with the `pcp_pmcd_t` selinux type were given management access to `/var/lib/docker(/.*)?` ([#21370](https://github.com/docker/docker/pull/21370)) + * `restart_syscall`, `copy_file_range`, `mlock2` joined the list of allowed calls in the default seccomp profile ([#21117](https://github.com/docker/docker/pull/21117), [#21262](https://github.com/docker/docker/pull/21262)) + * `send`, `recv` and `x32` were added to the list of allowed syscalls and arch in the default seccomp profile ([#19432](https://github.com/docker/docker/pull/19432)) + * Docker Content Trust now requests the server to perform snapshot signing ([#21046](https://github.com/docker/docker/pull/21046)) + * Support for using YubiKeys for Content Trust signing has been moved out of experimental ([#21591](https://github.com/docker/docker/pull/21591)) + + * Volumes + * Output of `docker volume ls` is now sorted by volume name ([#20389](https://github.com/docker/docker/pull/20389)) + * Local volumes can now accepts options similar to the unix `mount` tool ([#20262](https://github.com/docker/docker/pull/20262)) + - Fix an issue where one letter directory name could not be used as source for volumes ([#21106](https://github.com/docker/docker/pull/21106)) + + `docker run -v` now accepts a new flag `nocopy`. This tell the runtime not to copy the container path content into the volume (which is the default behavior) ([#21223](https://github.com/docker/docker/pull/21223)) + +------------------------------------------------------------------- +Wed Apr 13 11:16:51 UTC 2016 - jmassaguerpla@suse.com + +- docker.spec: apply gcc5 socket patch also for sle12 and leap + because gcc5 has been updated there as well. + +- docker.spec: add a "is_opensuse" check for the mount-secrets patch. + This way we can use this same package for opensuse. + +------------------------------------------------------------------- +Fri Apr 8 13:27:55 UTC 2016 - dmueller@suse.com + +- use go-lang for aarch64: + - drop fix_platform_type_arm.patch (works around a gcc-go bug, so + unnecessary) + +------------------------------------------------------------------- +Thu Apr 7 09:35:40 UTC 2016 - asarai@suse.de + +- Add patch from upstream (https://github.com/docker/docker/pull/21723) to fix + compilation on Factory and Tumbleweed (which have btrfsprogs >= 4.5). + + fix-btrfs-ioctl-structure.patch bnc#974208 + +------------------------------------------------------------------- +Tue Mar 22 15:27:26 UTC 2016 - fcastelli@suse.com + +- Changed systemd unit file and default sysconfig file to include network options, + this is needed to get SDN like flannel to work + +------------------------------------------------------------------- +Tue Mar 15 09:16:55 UTC 2016 - asarai@suse.de + +- docker.spec: update warning to mention that /etc/sysconfig/docker is sourced + by the migration script. + +------------------------------------------------------------------- +Mon Mar 14 10:20:19 UTC 2016 - asarai@suse.de + +- docker.spec: only Reccomends: the docker-image-migrator package as it is no + longer required for our ugly systemctl hacks. +- docker.spec: fix up documentation to refer to the script you need to run in + the migrator package. +- docker.spec: print a warning if you force the DOCKER_FORCE_INSTALL option. + +------------------------------------------------------------------- +Fri Mar 11 08:44:46 UTC 2016 - asarai@suse.de + +- spec: switch to new done file name from docker-image-migrator + +------------------------------------------------------------------- +Fri Mar 11 08:41:49 UTC 2016 - jmassaguerpla@suse.com + +- update to docker 1.10.3 (bnc#970637) + Runtime + Fix Docker client exiting with an "Unrecognized input header" error #20706 + Fix Docker exiting if Exec is started with both AttachStdin and Detach #20647 + Distribution + Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel #20831 + Fix a panic when pushing images to a registry which uses a misconfigured token service #21030 + Plugin system + Fix issue preventing volume plugins to start when SELinux is enabled #20834 + Prevent Docker from exiting if a volume plugin returns a null response for Get requests #20682 + Fix plugin system leaking file descriptors if a plugin has an error #20680 + Security + Fix linux32 emulation to fail during docker build #20672 It was due to the personality syscall being blocked by the default seccomp profile. + Fix Oracle XE 10g failing to start in a container #20981 It was due to the ipc syscall being blocked by the default seccomp profile. + Fix user namespaces not working on Linux From Scratch #20685 + Fix issue preventing daemon to start if userns is enabled and the subuid or subgid files contain comments #20725 + + More at https://github.com/docker/docker/releases/tag/v1.10.3 + +------------------------------------------------------------------- +Thu Mar 10 13:52:54 UTC 2016 - asarai@suse.de + +- spec: improve file-based migration checks to make sure that it doesn't cause + errors if running on a /var/lib/docker without /var/lib/docker/graph. + +------------------------------------------------------------------- +Wed Mar 9 13:45:14 UTC 2016 - asarai@suse.de + +- spec: implement file-based migration checks. The migrator will be updated to + match the warning message's instructions. This looks like it works with my + testing. + +------------------------------------------------------------------- +Mon Mar 7 14:09:17 UTC 2016 - normand@linux.vnet.ibm.com + +- more patches to build on ppc64 architecture + update netlink_gcc_go.patch + new netlink_netns_powerpc.patch + new boltdb_bolt_powerpc.patch + new libnetwork_drivers_bridge_powerpc.patch to replace + deleted fix-ppc64le.patch + +------------------------------------------------------------------- +Tue Mar 1 17:54:41 UTC 2016 - jmassaguerpla@suse.com + +- fix bsc#968972 - let docker manage the cgroups of the processes + that it launches without systemd + +------------------------------------------------------------------- +Tue Mar 1 15:28:56 UTC 2016 - jmassaguerpla@suse.com + +- Require docker-image-migrator (bnc#968933) + +------------------------------------------------------------------- +Tue Feb 23 08:55:17 UTC 2016 - jmassaguerpla@suse.com + +Update to version 1.10.2 (bnc#968933) + + - Runtime + Prevent systemd from deleting containers' cgroups when its configuration is reloaded #20518 + Fix SELinux issues by disregarding --read-only when mounting /dev/mqueue #20333 + Fix chown permissions used during docker cp when userns is used #20446 + Fix configuration loading issue with all booleans defaulting to true #20471 + Fix occasional panic with docker logs -f #20522 + + - Distribution + Keep layer reference if deletion failed to avoid a badly inconsistent state #20513 + Handle gracefully a corner case when canceling migration #20372 + Fix docker import on compressed data #20367 + Fix tar-split files corruption during migration that later cause docker push and docker save to fail #20458 + + - Networking + Fix daemon crash if embedded DNS is sent garbage #20510 + + - Volumes + Fix issue with multiple volume references with same name #20381 + + - Security + Fix potential cache corruption and delegation conflict issues #20523 + +link to changelog: + +https://github.com/docker/docker/blob/v1.10.2/CHANGELOG.md + +------------------------------------------------------------------- +Mon Feb 15 09:48:41 UTC 2016 - asarai@suse.com + +- fix-apparmor.patch: switch to a backported version of docker/docker#20305, + which also fixes several potential issues if the major version of apparmor + changes. + +------------------------------------------------------------------- +Mon Feb 15 08:35:43 UTC 2016 - asarai@suse.com + +- Remove 1.10.0 tarball. + +------------------------------------------------------------------- +Fri Feb 12 16:04:19 UTC 2016 - jmassaguerpla@suse.com + +- Update to docker 1.10.1 + It includes some fixes to 1.10.0, see detailed changelog in + +https://github.com/docker/docker/blob/v1.10.1/CHANGELOG.md + +------------------------------------------------------------------- +Tue Feb 9 17:24:46 UTC 2016 - jmassaguerpla@suse.com + +- Update docker to 1.10.0 (bnc#965918) + + Add usernamespace support + Add support for custom seccomp profiles + Improvements in network and volume management + +detailed changelog in + +https://github.com/docker/docker/blob/590d5108bbdaabb05af590f76c9757daceb6d02e/CHANGELOG.md + +- removed patches, because code has been merged in 1.10.0 release: + libcontainer-apparmor-fixes.patch: see: https://github.com/docker/docker/blob/release/v1.10/contrib/apparmor/template.go + fix_bnc_958255.patch: see https://github.com/docker/docker/commit/2b4f64e59018c21aacbf311d5c774dd5521b5352 + use_fs_cgroups_by_default.patch + fix_cgroup.parent_path_sanitisation.patch + add_bolt_ppc64.patch + add_bolt_arm64.patch + add_bolt_s390x.patch + +- remove gcc-go-build-static-libgo.patch: This has been replace by gcc-go-patches.patch + +- removed patches, because arm and ppc are not build using the dynbinary target, but the dyngccgo one: + docker_remove_journald_to_fix_dynbinary_build_on_arm.patch + docker_remove_journald_to_fix_dynbinary_build_on_powerpc.patch + docker_remove_journald_to_fix_dynbinary_build_on_arm64.patch + +- added patches: + fix_platform_type_arm.patch: fix build for arm64 and aarch64: set utsname as uint8 for arm64 and aarch64 + gcc5_socket_workaround.patch: gcc5-go in Tumbleweed includes this commit + https://github.com/golang/gofrontend/commit/a850225433a66a58613c22185c3b09626f5545eb + Which "fixes" the data type for RawSockaddr.Data + However, docker now expects the "wrong" data type, since docker had a workaround + for that issue. + Thus, we need to workaround the workaround in tumbleweed + netlink_gcc_go.patch: add constants for syscalls TUNSETIFF and TUNSETPERSIST to fix a gcc issue. + This is a workaround for bnc#964468: gcc-go can no longer compile Docker. + fix-apparmor.patch: fix https://github.com/docker/docker/issues/20269 . It affects SLE12 which has apparmor + version 2.8 and not openSUSE which has version 2.9. + fix-ppc64le.patch: Build netlink driver using int8 and not uint8 for the data structure + + +- reviewed patches: + ignore-dockerinit-checksum.patch: review context in patch + fix-docker-init.patch: review patch because build method has been changed in spec file for gcc-go + gcc-go-patches.patch: review context in patch + +- Build requires go >= 1.5: For version 1.9, we could use Go 1.4.3 + see GO_VERSION https://github.com/docker/docker/blob/release/v1.9/Dockerfile + However, for version 1.10, we need go 1.5.3 + see GO_VERSION https://github.com/docker/docker/blob/release/v1.10/Dockerfile + +- fix bnc#965600 - SLES12 SP1 - Static shared memory limit in container + + +------------------------------------------------------------------- +Tue Feb 9 13:24:34 UTC 2016 - asarai@suse.com + +- docker-mount-secrets.patch: fix up this patch to work on Docker 1.10 + +------------------------------------------------------------------- +Wed Jan 27 11:57:59 UTC 2016 - asarai@suse.com + +- docker-mount-secrets.patch: properly register /run/secrets as a + mountpoint, so that it is unmounted properly when the container + is removed and thus container removal works. (bnc#963142) +- docker-mount-secrets.patch: in addition, add some extra debugging + information to the secrets patch. + +------------------------------------------------------------------- +Wed Jan 27 09:42:59 UTC 2016 - asarai@suse.com + +- fix_json_econnreset_bug.patch: fix JSON bug that causes containers to not start + in weird circumstances. https://github.com/docker/docker/issues/14203 +------------------------------------------------------------------- +Wed Dec 23 11:10:54 UTC 2015 - fcastelli@suse.com jmassaguerpla@suse.com + +- fix_bnc_958255.patch: fix Docker creates strange apparmor profile + (bnc#958255) +- use_fs_cgroups_by_default.patch: Use fs cgroups by default: + https://github.com/docker/docker/commit/419fd7449fe1a984f582731fcd4d9455000846b0 +- fix_cgroup.parent_path_sanitisation.patch: fix cgroup.Parent path + sanitisation: + https://github.com/opencontainers/runc/commit/bf899fef451956be4abd63de6d6141d9f9096a02 +- Add rules for auditd. This is required to fix bnc#959405 +- Remove 7 patches, add 6 and modify 1, after 1.9.1 upgrade + * Removed: + - docker_missing_ppc64le_netlink_linux_files.patch: the code that this + bug refers to has benn removed upstream + - docker_rename_jump_amd64_as_jump_linux.patch: the code that this bug + refers to has been removed upstream + - Remove fix_15279.patch: code has been merged upstream + - Remove add_missing_syscall_for_s390x.patch: code has been merged upstream + - Remove fix_incompatible_assignment_error_bnc_950931.patch: code has been + merged upstream + - Remove fix_libsecomp_error_bnc_950931.patch: the code that this bug refers to + has been removed upstream + - Remove gcc5_socket_workaround.patch: Code has been fixed. Building with + this patch is giving the error we were trying to fix, implying that the + code has been fixed somewhere else. + * Added: + - add_bolt_ppc64.patch + - add_bolt_arm64.patch + - docker_remove_journald_to_fix_dynbinary_build_on_arm.patch + - docker_remove_journald_to_fix_dynbinary_build_on_powerpc.patch + - docker_remove_journald_to_fix_dynbinary_build_on_arm64.patch + - gcc-go-build-static-libgo.patch: enable static linking of libgo in ggc-go + In order to do this, we had to work-around an issue from gcc-go: + https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69357 + * Modify: +- Upgrade to 1.9.1(bnc#956434) + * Runtime: + - Do not prevent daemon from booting if images could not be restored + (#17695) + - Force IPC mount to unmount on daemon shutdown/init (#17539) + - Turn IPC unmount errors into warnings (#17554) + - Fix `docker stats` performance regression (#17638) + - Clarify cryptic error message upon `docker logs` if `--log-driver=none` + (#17767) + - Fix seldom panics (#17639, #17634, #17703) + - Fix opq whiteouts problems for files with dot prefix (#17819) + - devicemapper: try defaulting to xfs instead of ext4 for performance + reasons (#17903, #17918) + - devicemapper: fix displayed fs in docker info (#17974) + - selinux: only relabel if user requested so with the `z` option + (#17450, #17834) + - Do not make network calls when normalizing names (#18014) + *Client: + - Fix `docker login` on windows (#17738) + - Fix bug with `docker inspect` output when not connected to daemon + (#17715) + - Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) + * Builder: + - Fix regression with symlink behavior in ADD/COPY (#17710) + * Networking: + - Allow passing a network ID as an argument for `--net` (#17558) + - Fix connect to host and prevent disconnect from host for `host` network + (#17476) + - Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range + is not the first block in the network (#17853) + - Restore deterministic `IPv6` generation from `MAC` address on default + `bridge` network (#17890) + - Allow port-mapping only for endpoints created on docker run (#17858) + - Fixed an endpoint delete issue with a possible stale sbox (#18102) + * Distribution: + - Correct parent chain in v2 push when v1Compatibility files on the disk + are inconsistent (#18047) +- Update to version 1.9.0 (bnc#954812): + * Runtime: + - `docker stats` now returns block IO metrics (#15005) + - `docker stats` now details network stats per interface (#15786) + - Add `ancestor=` filter to `docker ps --filter` flag to filter + containers based on their ancestor images (#14570) + - Add `label=` filter to `docker ps --filter` to filter + containers based on label (#16530) + - Add `--kernel-memory` flag to `docker run` (#14006) + - Add `--message` flag to `docker import` allowing to specify an optional + message (#15711) + - Add `--privileged` flag to `docker exec` (#14113) + - Add `--stop-signal` flag to `docker run` allowing to replace the + container process stopping signal (#15307) + - Add a new `unless-stopped` restart policy (#15348) + - Inspecting an image now returns tags (#13185) + - Add container size information to `docker inspect` (#15796) + - Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` + (#17275) + - Remove the deprecated `/container/ps` endpoint from the API (#15972) + - Send and document correct HTTP codes for `/exec//start` (#16250) + - Share shm and mqueue between containers sharing IPC namespace (#15862) + - Event stream now shows OOM status when `--oom-kill-disable` is + set (#16235) + - Ensure special network files (/etc/hosts etc.) are read-only if + bind-mounted + with `ro` option (#14965) + - Improve `rmi` performance (#16890) + - Do not update /etc/hosts for the default bridge network, except for links + (#17325) + - Fix conflict with duplicate container names (#17389) + - Fix an issue with incorrect template execution in `docker inspect` + (#17284) + - DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run + (#16271) + * Client: + - Allow `docker import` to import from local files (#11907) + * Builder: + - Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different + stop-signal for the container process (#15307) + - Add an `ARG` Dockerfile instruction and a `--build-arg` flag to + `docker build` + that allows to add build-time environment variables (#15182) + - Improve cache miss performance (#16890) + * Storage: + - devicemapper: Implement deferred deletion capability (#16381) + * Networking: + - `docker network` exits experimental and is part of standard release + (#16645) + - New network top-level concept, with associated subcommands and API + (#16645) + WARNING: the API is different from the experimental API + - Support for multiple isolated/micro-segmented networks (#16645) + - Built-in multihost networking using VXLAN based overlay driver (#14071) + - Support for third-party network plugins (#13424) + - Ability to dynamically connect containers to multiple networks (#16645) + - Support for user-defined IP address management via pluggable IPAM drivers + (#16910) + - Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in + nodes discovery (#16229) + - Add `--cluster-store-opt` for setting up TLS settings (#16644) + - Add `--dns-opt` to the daemon (#16031) + - DEPRECATE following container `NetworkSettings` fields in API v1.21: + `EndpointID`, `Gateway`, `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, + `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. + Those are now specific to the `bridge` network. Use + `NetworkSettings.Networks` to inspect + the networking settings of a container per network. + * Volumes: + - New top-level `volume` subcommand and API (#14242) + - Move API volume driver settings to host-specific config (#15798) + - Print an error message if volume name is not unique (#16009) + - Ensure volumes created from Dockerfiles always use the local volume driver + (#15507) + - DEPRECATE auto-creating missing host paths for bind mounts (#16349) + * Logging: + - Add `awslogs` logging driver for Amazon CloudWatch (#15495) + - Add generic `tag` log option to allow customizing container/image + information passed to driver (e.g. show container names) (#15384) + - Implement the `docker logs` endpoint for the journald driver (#13707) + - DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) + * Distribution: + - `docker search` now works with partial names (#16509) + - Push optimization: avoid buffering to file (#15493) + - The daemon will display progress for images that were already being + pulled by another client (#15489) + - Only permissions required for the current action being performed are + requested (#) + - Renaming trust keys (and respective environment variables) from `offline` + to `root` and `tagging` to `repository` (#16894) + - DEPRECATE trust key environment variables + `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and + `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) + * Security: + - Add SELinux profiles to the rpm package (#15832) + - Fix various issues with AppArmor profiles provided in the deb package + (#14609) + - Add AppArmor policy that prevents writing to /proc (#15571) +- Change systemd unit file to no longer use the deprecated "-d" option + (bnc#954737) + +------------------------------------------------------------------- +Tue Nov 24 16:34:52 UTC 2015 - fcastelli@suse.com + +- Changed docker-mount-secrets.patch: allow removal of containers + even when the entry point failed. bnc#954797 + +------------------------------------------------------------------- +Tue Nov 3 12:36:25 UTC 2015 - msabate@suse.com + +- Fixed the format of the fix_libsecomp_error_bnc_950931 patch. + +------------------------------------------------------------------- +Tue Nov 3 12:30:22 UTC 2015 - msabate@suse.com + +- Merged the fix_libsecomp_error_bnc_950931.patch and the + fix_x86_build_removing_empty_file_jump_amd_64.patch patches. + +------------------------------------------------------------------- +Tue Nov 3 10:39:27 UTC 2015 - jmassaguerpla@suse.com + +- Fix build for x86_64. Patch fix_libsecomp_error_bnc_950931.patch + had created and empty file jump_amd64.go instead of removing it. + This broke the build for x86_64. + This commit fixes it by removing that empty file. + + fix_x86_build_removing_empty_file_jump_amd_64.patch: patch that + removes empty file jump_amd64.go + +------------------------------------------------------------------- +Mon Nov 2 15:49:48 UTC 2015 - msabate@suse.com + +- Added patch that fixes a known gcc-go for ppc64xe in the syscall.RawSockAddr + type. + + gcc5_socket_workaround.patch + +------------------------------------------------------------------- +Thu Oct 29 14:17:32 UTC 2015 - jmassaguerpla@suse.com + +- Add patches for fixing ppc64le build (bnc#950931) + + fix_libsecomp_error_bnc_950931.patch + fix_incompatible_assignment_error_bnc_950931.patch + docker_missing_ppc64le_netlink_linux_files.patch + +- Remove docker_rename_jump_amd64_as_jump_linux.patch because it clashes + with the previous patches. + +------------------------------------------------------------------- +Thu Oct 22 12:11:14 UTC 2015 - jmassaguerpla@suse.com + +- Exclude libgo as a requirement. The auto requires script was adding + libgo as a requirement when building with gcc-go which was wrong. + +------------------------------------------------------------------- +Fri Oct 16 15:43:46 UTC 2015 - jmassaguerpla@suse.com + +- Add patch for missing systemcall for s390x. See + + https://github.com/docker/docker/commit/eecf6cd48cf7c48f00aa8261cf431c87084161ae + + add_missing_syscall_for_s390x.patch: contains the patch + +- Exclude s390x for sle12 because it hangs when running go. It works for sle12sp1 + thus we don't want to exclude sle12sp1 but only sle12. + +------------------------------------------------------------------- +Mon Oct 12 20:10:00 UTC 2015 - fcastelli@suse.com + +- Update docker to 1.8.3 version: + * Fix layer IDs lead to local graph poisoning (CVE-2014-8178) (bnc#949660) + * Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) + * Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry + +------------------------------------------------------------------- +Tue Sep 22 13:20:49 UTC 2015 - jmassaguerpla@suse.com + +- Update docker to 1.8.2 version + + see detailed changelog in + + https://github.com/docker/docker/releases/tag/v1.8.2 + + fix bsc#946653 update do docker 1.8.2 + +- devicemapper: fix zero-sized field access + Fix issue #15279: does not build with Go 1.5 tip + Due to golang/go@7904946 + the devices field is dropped. + + This solution works on go1.4 and go1.5 + + See more in https://github.com/docker/docker/pull/15404 + + This fix was not included in v1.8.2. See previous link + on why. + + fix_15279.patch: contains the patch for issue#15279 + +------------------------------------------------------------------- +Fri Aug 21 08:46:30 UTC 2015 - normand@linux.vnet.ibm.com + +- new patch as per upstream issue + https://github.com/docker/docker/issues/14056#issuecomment-113680944 + docker_rename_jump_amd64_as_jump_linux.patch + +------------------------------------------------------------------- +Fri Aug 21 08:07:58 UTC 2015 - normand@linux.vnet.ibm.com + +- ignore-dockerinit-checksum.patch need -p1 in spec + +------------------------------------------------------------------- +Thu Aug 13 09:38:03 UTC 2015 - jmassaguerpla@suse.com + +- Update to docker 1.8.1(bsc#942369 and bsc#942370): + - Fix a bug where pushing multiple tags would result in invalid images + +- Update to docker 1.8.0: + see detailed changelog in + + https://github.com/docker/docker/releases/tag/v1.8.0 + +- remove docker-netns-aarch64.patch: This patch was adding + vendor/src/github.com/vishvananda/netns/netns_linux_arm64.go + which is now included upstream, so we don't need this patch anymore + + +------------------------------------------------------------------- +Fri Jul 24 14:41:21 UTC 2015 - jmassaguerpla@suse.com + +- Remove 0002-Stripped-dockerinit-binary.patch because we do not + use it anymore (we got rid of that when updating to 1.7.1) + +------------------------------------------------------------------- +Fri Jul 24 14:14:38 UTC 2015 - jmassaguerpla@suse.com + +- Exclude archs where docker does not build. Otherwise it gets into + and infinite loop when building. + + We'll fix that later if we want to release for those archs. + +------------------------------------------------------------------- +Wed Jul 15 08:11:11 UTC 2015 - jmassaguerpla@suse.com + +- Update to 1.7.1 (2015-07-14) (bnc#938156) + * Runtime + - Fix default user spawning exec process with docker exec + - Make --bridge=none not to configure the network bridge + - Publish networking stats properly + - Fix implicit devicemapper selection with static binaries + - Fix socket connections that hung intermittently + - Fix bridge interface creation on CentOS/RHEL 6.6 + - Fix local dns lookups added to resolv.conf + - Fix copy command mounting volumes + - Fix read/write privileges in volumes mounted with --volumes-from + * Remote API + - Fix unmarshalling of Command and Entrypoint + - Set limit for minimum client version supported + - Validate port specification + - Return proper errors when attach/reattach fail + * Distribution + - Fix pulling private images + - Fix fallback between registry V2 and V1 + +------------------------------------------------------------------- +Fri Jul 10 11:22:00 UTC 2015 - jmassaguerpla@suse.com + +- Exclude init scripts other than systemd from the test-package + +------------------------------------------------------------------- +Wed Jul 1 12:38:50 UTC 2015 - jmassaguerpla@suse.com + +- Exclude intel 32 bits arch. Docker does not built on that. Let's + make it explicit. + +------------------------------------------------------------------- +Thu Jun 25 16:49:59 UTC 2015 - dmueller@suse.com + +- rediff ignore-dockerinit-checksum.patch, gcc-go-build-static-libgo.patch + to make them apply again. +- introduce go_arches for architectures that use the go compiler + instead of gcc-go +- add docker-netns-aarch64.patch: Add support for AArch64 +- enable build for aarch64 + +------------------------------------------------------------------- +Wed Jun 24 09:02:03 UTC 2015 - fcastelli@suse.com + +- Build man pages only on platforms where gc compiler is available. + +------------------------------------------------------------------- +Mon Jun 22 08:48:11 UTC 2015 - fcastelli@suse.com + +- Updated to 1.7.0 (2015-06-16) - bnc#935570 + * Runtime + - Experimental feature: support for out-of-process volume plugins + - The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag + - The `exec` command supports the `-u|--user` flag to specify the new process owner + - Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags + - The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` + - Container block IO can be controlled in `docker run` using`--blkio-weight` + - ZFS support + - The `docker logs` command supports a `--since` argument + - UTS namespace can be shared with the host with `docker run --uts=host` + * Quality + - Networking stack was entirely rewritten as part of the libnetwork effort + - Engine internals refactoring + - Volumes code was entirely rewritten to support the plugins effort + - Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting + * Build + - Support ${variable:-value} and ${variable:+value} syntax for environment variables + - Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` + - git context changes with branches and directories + - The .dockerignore file support exclusion rules + * Distribution + - Client support for v2 mirroring support for the official registry + * Bugfixes + - Firewalld is now supported and will automatically be used when available + - mounting --device recursively +- Patch 0002-Stripped-dockerinit-binary.patch renamed to fix-docker-init.patch + and fixed to build with latest version of docker + +------------------------------------------------------------------- +Tue Jun 9 16:35:46 UTC 2015 - jmassaguerpla@suse.com + +- Add test subpackage and fix line numbers in patches + +------------------------------------------------------------------- +Fri Jun 5 15:29:45 UTC 2015 - fcastelli@suse.com + +- Fixed ppc64le name inside of spec file + +------------------------------------------------------------------- +Fri Jun 5 15:23:47 UTC 2015 - fcastelli@suse.com + +- Build docker on PPC and S390x using gcc-go provided by gcc5 + * added sysconfig.docker.ppc64le: make docker daemon start on ppc64le + despite some iptables issues. To be removed soon + * ignore-dockerinit-checksum.patch: applied only when building with + gcc-go. Required to workaround a limitation of gcc-go + * gcc-go-build-static-libgo.patch: used only when building with gcc-go, + link libgo statically into docker itself. + +------------------------------------------------------------------- +Mon Jun 1 15:47:59 UTC 2015 - fcastelli@suse.com + +- Remove set-SCC_URL-env-variable.patch, the SCC_URL is now read + from SUSEConnect by the container service + +------------------------------------------------------------------- +Mon Jun 1 13:03:24 UTC 2015 - fcastelli@suse.com + +- Automatically set SCC_URL environment variable inside of the + containers by parsing the /etc/SUSEConnect.example file + * Add set-SCC_URL-env-variable.patch + +------------------------------------------------------------------- +Mon Jun 1 10:00:55 UTC 2015 - fcastelli@suse.com + +- Place SCC machine credentials inside of /run/secrets/credentials.d + * Edit docker-mount-scc-credentials.patch¬ + +------------------------------------------------------------------- +Thu May 28 15:10:09 UTC 2015 - dmacvicar@suse.de + +- pass the SCC machine credentials to the container + * Add docker-mount-scc-credentials.patch + +------------------------------------------------------------------- +Wed May 27 10:02:51 UTC 2015 - dmacvicar@suse.de + +- build and install man pages + +------------------------------------------------------------------- +Mon May 18 15:08:59 UTC 2015 - fcastelli@suse.com + +- Update to version 1.6.2 (2015-05-13) [bnc#931301] + * Revert change prohibiting mounting into /sys + +------------------------------------------------------------------- +Fri May 8 15:00:38 UTC 2015 - fcastelli@suse.com + +Updated to version 1.6.1 (2015-05-07) [bnc#930235] + * Security + - Fix read/write /proc paths (CVE-2015-3630) + - Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) + - Fix opening of file-descriptor 1 (CVE-2015-3627) + - Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) + - Prohibit mount of /sys + * Runtime + - Update Apparmor policy to not allow mounts +- Updated libcontainer-apparmor-fixes.patch: adapt patch to reflect + changes introduced by docker 1.6.1 + +------------------------------------------------------------------- +Thu May 7 13:33:03 UTC 2015 - develop7@develop7.info + +- Get rid of SocketUser and SocketGroup workarounds for docker.socket + +------------------------------------------------------------------- +Fri Apr 17 14:02:13 UTC 2015 - fcastelli@suse.com + +- Updated to version 1.6.0 (2015-04-07) [bnc#908033] + * Builder: + + Building images from an image ID + + build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` + + `commit --change` to apply specified Dockerfile instructions while committing the image + + `import --change` to apply specified Dockerfile instructions while importing the image + + basic build cancellation + * Client: + + Windows Support + * Runtime: + + Container and image Labels + + `--cgroup-parent` for specifying a parent cgroup to place container cgroup within + + Logging drivers, `json-file`, `syslog`, or `none` + + Pulling images by ID + + `--ulimit` to set the ulimit on a container + + `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) +- Updated '0002-Stripped-dockerinit-binary.patch' to reflect changes inside of + the latest version of Docker. +- bnc#908033: support of Docker Registry API v2. + +------------------------------------------------------------------- +Fri Apr 3 19:57:38 UTC 2015 - dmueller@suse.com + +- enable build for armv7l + +------------------------------------------------------------------- +Fri Apr 3 14:59:35 UTC 2015 - fcastelli@suse.com + +- Updated docker.spec to fixed building with the latest version of our + Go pacakge. +- Updated 0002-Stripped-dockerinit-binary.patch to fix check made by + the docker daemon against the dockerinit binary. + +------------------------------------------------------------------- +Fri Mar 27 10:29:35 UTC 2015 - fcastelli@suse.com + +- Updated systemd service and socket units to fix socket activation + and to align with best practices recommended by upstram. Moreover + socket activation fixes bnc#920645. + +------------------------------------------------------------------- +Wed Feb 11 13:59:01 UTC 2015 - fcastelli@suse.com + + - Updated to 1.5.0 (2015-02-10): + * Builder: + - Dockerfile to use for a given `docker build` can be specified with + the `-f` flag + - Dockerfile and .dockerignore files can be themselves excluded as part + of the .dockerignore file, thus preventing modifications to these files + invalidating ADD or COPY instructions cache + - ADD and COPY instructions accept relative paths + - Dockerfile `FROM scratch` instruction is now interpreted as a no-base + specifier + - Improve performance when exposing a large number of ports + * Hack: + - Allow client-side only integration tests for Windows + - Include docker-py integration tests against Docker daemon as part of our + test suites + * Packaging: + - Support for the new version of the registry HTTP API + - Speed up `docker push` for images with a majority of already existing + layers + - Fixed contacting a private registry through a proxy + * Remote API: + - A new endpoint will stream live container resource metrics and can be + accessed with the `docker stats` command + - Containers can be renamed using the new `rename` endpoint and the + associated `docker rename` command + - Container `inspect` endpoint show the ID of `exec` commands running in + this container + - Container `inspect` endpoint show the number of times Docker + auto-restarted the container + - New types of event can be streamed by the `events` endpoint: ‘OOM’ + (container died with out of memory), ‘exec_create’, and ‘exec_start' + - Fixed returned string fields which hold numeric characters incorrectly + omitting surrounding double quotes + * Runtime: + - Docker daemon has full IPv6 support + - The `docker run` command can take the `--pid=host` flag to use the host + PID namespace, which makes it possible for example to debug host processes + using containerized debugging tools + - The `docker run` command can take the `--read-only` flag to make the + container’s root filesystem mounted as readonly, which can be used in + combination with volumes to force a container’s processes to only write to + locations that will be persisted + - Container total memory usage can be limited for `docker run` using the + `—memory-swap` flag + - Major stability improvements for devicemapper storage driver + - Better integration with host system: containers will reflect changes + to the host's `/etc/resolv.conf` file when restarted + - Better integration with host system: per-container iptable rules are moved + to the DOCKER chain + - Fixed container exiting on out of memory to return an invalid exit code + * Other: + - The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are + properly taken into account by the client when connecting to the + Docker daemon + +------------------------------------------------------------------- +Thu Jan 15 10:00:07 UTC 2015 - fcastelli@suse.com + +- Updated to 1.4.1 (2014-12-15): + * Runtime: + - Fix issue with volumes-from and bind mounts not being honored after + create (fixes bnc#913213) + +------------------------------------------------------------------- +Thu Jan 15 09:41:20 UTC 2015 - fcastelli@suse.com + +- Added e2fsprogs as runtime dependency, this is required when the + devicemapper driver is used. (bnc#913211). +- Fixed owner & group for docker.socket (thanks to Andrei Dziahel and + https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=752555#5) + +------------------------------------------------------------------- +Fri Dec 12 16:13:30 UTC 2014 - fcastelli@suse.com + +- Updated to 1.4.0 (2014-12-11): + * Notable Features since 1.3.0: + - Set key=value labels to the daemon (displayed in `docker info`), applied with + new `-label` daemon flag + - Add support for `ENV` in Dockerfile of the form: + `ENV name=value name2=value2...` + - New Overlayfs Storage Driver + - `docker info` now returns an `ID` and `Name` field + - Filter events by event name, container, or image + - `docker cp` now supports copying from container volumes + - Fixed `docker tag`, so it honors `--force` when overriding a tag for existing + image. +- Changes introduced by 1.3.3 (2014-12-11): + * Security: + - Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) - (bnc#909709) + - Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) - (bnc#909710) + - Validate image IDs (CVE-2014-9358) - (bnc#909712) + * Runtime: + - Fix an issue when image archives are being read slowly + * Client: + - Fix a regression related to stdin redirection + - Fix a regression with `docker cp` when destination is the current directory + +------------------------------------------------------------------- +Wed Nov 26 11:27:06 UTC 2014 - fcastelli@suse.com + +- Updated to 1.3.2 (2014-11-20) - fixes bnc#907012 (CVE-2014-6407) and + bnc#907014 (CVE-2014-6408) + * Security: + - Fix tar breakout vulnerability + - Extractions are now sandboxed chroot + - Security options are no longer committed to images + * Runtime: + - Fix deadlock in `docker ps -f exited=1` + - Fix a bug when `--volumes-from` references a container that failed to start + * Registry: + - `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 + - Private registries whose IPs fall in the 127.0.0.0/8 range do no need + the `--insecure-registry` flag + - Skip the experimental registry v2 API when mirroring is enabled +- Fixed minor packaging issues. + +------------------------------------------------------------------- +Fri Oct 31 08:54:47 UTC 2014 - fcastelli@suse.com + +- Updated to version 1.3.1 2014-10-28) + * Security: + - Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and + registry [CVE-2014-5277] + - Secure HTTPS connection to registries with certificate verification and + without HTTP fallback unless `--insecure-registry` is specified + * Runtime: + - Fix issue where volumes would not be shared + * Client: + - Fix issue with `--iptables=false` not automatically + setting `--ip-masq=false` + - Fix docker run output to non-TTY stdout + * Builder: + - Fix escaping `$` for environment variables + - Fix issue with lowercase `onbuild` Dockerfile instruction + - Restrict envrionment variable expansion to `ENV`, `ADD`, `COPY`, + `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + +------------------------------------------------------------------- +Mon Oct 20 08:38:30 UTC 2014 - fcastelli@suse.com + + - Upgraded to version 1.3.0 (2014-10-14) + * docker `exec` allows you to run additional processes inside existing containers + * docker `create` gives you the ability to create a container via the cli without executing a process + * `--security-opts` options to allow user to customize container labels and apparmor profiles + * docker `ps` filters + * wildcard support to copy/add + * move production urls to get.docker.com from get.docker.io + * allocate ip address on the bridge inside a valid cidr + * use drone.io for pr and ci testing + * ability to setup an official registry mirror + * Ability to save multiple images with docker `save` + +------------------------------------------------------------------- +Fri Sep 12 13:21:40 UTC 2014 - cbosdonnat@suse.com + +- Generated AppArmor profile used mount rules which aren't supported + in our version of AppArmor. libcontainer-apparmor-fixes.patch + +------------------------------------------------------------------- +Thu Sep 4 15:41:39 UTC 2014 - fcastelli@suse.com + +- Updates to SUSE's readme file. + +------------------------------------------------------------------- +Mon Aug 25 07:49:48 UTC 2014 - fcastelli@suse.com + +- Upgraded to version 1.2.0: + * Runtime: + - Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime + - Auto-restart containers using policies + - Use /var/lib/docker/tmp for large temporary files + - `--cap-add` and `--cap-drop` to tweak what linux capability you want + - `--device` to use devices in containers + * Client: + - `docker search` on private registries + - Add `exited` filter to `docker ps --filter` + - `docker rm -f` now kills instead of stop + - Support for IPv6 addresses in `--dns` flag + * Proxy: + - Proxy instances in separate processes + - Small bug fix on UDP proxy + +------------------------------------------------------------------- +Fri Aug 8 15:31:41 UTC 2014 - fcastelli@suse.com + +- Final changes to README_SUSE.md + +------------------------------------------------------------------- +Fri Aug 8 10:28:48 UTC 2014 - fcastelli@suse.com + +- Added other small fixes to README_SUSE.md + +------------------------------------------------------------------- +Thu Aug 7 14:06:30 UTC 2014 - fcastelli@suse.com + +- Small improvements to README_SUSE.md + +------------------------------------------------------------------- +Thu Aug 7 13:29:30 UTC 2014 - fcastelli@suse.com + +- Removed useless sysctl rule. +- Added README_SUSE.md + +------------------------------------------------------------------- +Fri Jul 25 06:17:04 UTC 2014 - fcastelli@suse.com + +- Updated to 1.1.2: + * Runtime: + - Fix port allocation for existing containers + - Fix containers restart on daemon restart +- Updated conflict condition with LXC package. +------------------------------------------------------------------- +Fri Jul 18 09:38:47 UTC 2014 - fcastelli@suse.com + +- Add apparmor-parser runtime dependency + +------------------------------------------------------------------- +Fri Jul 18 08:44:29 UTC 2014 - fcastelli@suse.com + +- Build with AppArmor and SELinux support + +------------------------------------------------------------------- +Wed Jul 16 13:37:23 UTC 2014 - fcastelli@suse.com + +- Ensure docker can be built only on x86_64 + +------------------------------------------------------------------- +Wed Jul 16 09:07:45 UTC 2014 - fcastelli@suse.com + +- Added docker-rpmlintrc to list of source files + +------------------------------------------------------------------- +Mon Jul 14 09:39:40 UTC 2014 - fcastelli@suse.com + +- Updated to 1.1.1, notables features since 1.0.0: + * Add `.dockerignore` support + * Pause containers during `docker commit` + * Add `--tail` to `docker logs` + * Enhance security for the LXC driver +- Builder + * Fix issue with ADD + * Allow a tar file as context for `docker build` + * Fix issue with white-spaces and multi-lines in `Dockerfiles` + * Fix `ONBUILD` instruction passed to grandchildren +- Runtime + * Overall performance improvements + * Allow `/` as source of `docker run -v` + * Fix port allocation + * Fix bug in `docker save` + * Add links information to `docker inspect` + * Fix events subscription + * Fix /etc/hostname file with host networking + * Allow `-h` and `--net=none` + * Fix issue with hotplug devices in `--privileged` +- Client + * Improve command line parsing for `docker commit` + * Fix artifacts with events + * Fix a panic with empty flags +- Remote API + * Improve status code for the `start` and `stop` endpoints +- Miscellaneous + * Fix several races + +------------------------------------------------------------------- +Mon Jul 14 09:03:23 UTC 2014 - fcastelli@suse.com + +- Fix CVE-2014-3499: systemd socket activation results in privilege escalation [bnc#885209] + +------------------------------------------------------------------- +Tue Jun 10 15:58:24 UTC 2014 - fcastelli@suse.com + +- add exclusivearch to reduce to architectures with a working "go" package + (patch submitted by Rudy). + +------------------------------------------------------------------- +Mon Jun 9 21:09:28 UTC 2014 - fcastelli@suse.com + +- Updated to 1.0.0, Notable features since 0.12.0 + * Production support + +------------------------------------------------------------------- +Mon Jun 9 14:58:12 UTC 2014 - fcastelli@suse.com + +- Upgraded to 0.12.0: + * New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file + * Inherit file permissions from the host on `ADD` + * New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer + * The `images` command has a `-f`/`--filter` option to filter the list of images + * Add `--force-rm` to clean up after a failed build + * Standardize JSON keys in Remote API to CamelCase + * Pull from a docker run now assumes `latest` tag if not specified + * Enhance security on Linux capabilities and device nodes + +------------------------------------------------------------------- +Wed May 21 15:24:11 UTC 2014 - fcastelli@suse.com + +- Do not require ca-certificates-cert package at runtime, it's not needed. + +------------------------------------------------------------------- +Wed May 21 14:22:47 UTC 2014 - fcastelli@suse.com + +- Disabled AUFS backend at build time, we are never going to support that. +- Updated rpmlint to ignore missing man page of docker. + +------------------------------------------------------------------- +Wed May 21 08:10:48 UTC 2014 - smoioli@suse.com + +- Fixes a merge issue with TTYs: https://github.com/dotcloud/docker/pull/4882 + +------------------------------------------------------------------- +Thu May 15 15:04:51 UTC 2014 - fcastelli@suse.com + +- Ensure /etc/sysconfig/docker file is created upon package installation. + +------------------------------------------------------------------- +Thu May 15 14:35:39 UTC 2014 - fcastelli@suse.com + +- Updated rpmlintrc + +------------------------------------------------------------------- +Thu May 15 13:45:03 UTC 2014 - fcastelli@suse.com + +- Do not specify a custon DOCKERINIT_PATH at build time. + +------------------------------------------------------------------- +Thu May 15 13:21:44 UTC 2014 - fcastelli@suse.com + +- Removed 0001-Allowed-installation-of-dockerinit-into-usr-lib64.patch, leave + dockerinit installed inside of /usr/lib/docker. + +------------------------------------------------------------------- +Thu May 15 13:05:20 UTC 2014 - fcastelli@suse.com + +- Added sysconfig file to handle docker environment file. + +------------------------------------------------------------------- +Thu May 8 08:09:17 UTC 2014 - fcastelli@suse.com + + - Update to 0.11.1: + * Registry: + - Fix push and pull to private registry + - 0.11.0 changes: + * SELinux support for mount and process labels + * Linked containers can be accessed by hostname + * Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces + * Add a ping endpoint to the Remote API to do healthchecks of your docker daemon + * Logs can now be returned with an optional timestamp + * Docker now works with registries that support SHA-512 + * Multiple registry endpoints are supported to allow registry mirrors + +------------------------------------------------------------------- +Wed Apr 9 07:28:35 UTC 2014 - fcastelli@suse.com + +- Updated to version 0.10.0: + * Builder: + - Fix printing multiple messages on a single line. Fixes broken output during builds. + - Follow symlinks inside container's root for ADD build instructions. + - Fix EXPOSE caching. + * Contrib: + - Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. + - Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. + - Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. + - Add check-config script to contrib. + - Fix fish shell completion. + * Remote API: + - Add TLS auth support for API. + - Move git clone from daemon to client. + - Fix content-type detection in docker cp. + - Split API into 2 go packages. + * Runtime: + - Support hairpin NAT without going through Docker server. + - devicemapper: succeed immediately when removing non-existing devices. + - devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). + - devicemapper: increase timeout in waitClose to 10 seconds. + - devicemapper: ensure we shut down thin pool cleanly. + - devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. + - devicemapper: avoid AB-BA deadlock. + - devicemapper: make shutdown better/faster. + - improve alpha sorting in mflag. + - Remove manual http cookie management because the cookiejar is being used. + - Use BSD raw mode on Darwin. Fixes nano, tmux and others. + - Add FreeBSD support for the client. + - Merge auth package into registry. + - Add deprecation warning for -t on `docker pull`. + - Remove goroutine leak on error. + - Update parseLxcInfo to comply with new lxc1.0 format. + - Fix attach exit on darwin. + - Improve deprecation message. + - Retry to retrieve the layer metadata up to 5 times for `docker pull`. + - Only unshare the mount namespace for execin. + - Merge existing config when committing. + - Disable daemon startup timeout. + - Fix issue #4681: add loopback interface when networking is disabled. + - Add failing test case for issue #4681. + - Send SIGTERM to child, instead of SIGKILL. + - Show the driver and the kernel version in `docker info` even when not in debug mode. + - Always symlink /dev/ptmx for libcontainer. This fixes console related problems. + - Fix issue caused by the absence of /etc/apparmor.d. + - Don't leave empty cidFile behind when failing to create the container. + - Mount cgroups automatically if they're not mounted already. + - Use mock for search tests. + - Update to double-dash everywhere. + - Move .dockerenv parsing to lxc driver. + - Move all bind-mounts in the container inside the namespace. + - Don't use separate bind mount for container. + - Always symlink /dev/ptmx for libcontainer. + - Don't kill by pid for other drivers. + - Add initial logging to libcontainer. + - Sort by port in `docker ps`. + - Move networking drivers into runtime top level package. + - Add --no-prune to `docker rmi`. + - Add time since exit in `docker ps`. + - graphdriver: add build tags. + - Prevent allocation of previously allocated ports & prevent improve port allocation. + - Add support for --since/--before in `docker ps`. + - Clean up container stop. + - Add support for configurable dns search domains. + - Add support for relative WORKDIR instructions. + - Add --output flag for docker save. + - Remove duplication of DNS entries in config merging. + - Add cpuset.cpus to cgroups and native driver options. + - Remove docker-ci. + - Promote btrfs. btrfs is no longer considered experimental. + - Add --input flag to `docker load`. + - Return error when existing bridge doesn't match IP address. + - Strip comments before parsing line continuations to avoid interpreting instructions as comments. + - Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. + - Add systemd implementation of cgroups and make containers show up as systemd units. + - Fix commit and import when no repository is specified. + - Remount /var/lib/docker as --private to fix scaling issue. + - Use the environment's proxy when pinging the remote registry. + - Reduce error level from harmless errors. + - Allow --volumes-from to be individual files. + - Fix expanding buffer in StdCopy. + - Set error regardless of attach or stdin. This fixes #3364. + - Add support for --env-file to load environment variables from files. + - Symlink /etc/mtab and /proc/mounts. + - Allow pushing a single tag. + - Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. + - Don't throw error when starting an already running container. + - Fix dynamic port allocation limit. + - remove setupDev from libcontainer. + - Add API version to `docker version`. + - Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. + - Fix --volumes-from mount failure. + - Allow non-privileged containers to create device nodes. + - Skip login tests because of external dependency on a hosted service. + - Deprecate `docker images --tree` and `docker images --viz`. + - Deprecate `docker insert`. + - Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. + - Add specific error message when hitting 401 over HTTP on push. + - Fix absolute volume check. + - Remove volumes-from from the config. + - Move DNS options to hostconfig. + - Update the apparmor profile for libcontainer. + - Add deprecation notice for `docker commit -run`. + +------------------------------------------------------------------- +Wed Mar 26 16:47:03 UTC 2014 - fcastelli@suse.com + +- Updated to 0.9.1: + * Builder: + - Fix printing multiple messages on a single line. Fixes broken output during builds. + * Remote API: + - Fix content-type detection in `docker cp`. + * Runtime: + - Use BSD raw mode on Darwin. Fixes nano, tmux and others. + - Only unshare the mount namespace for execin. + - Retry to retrieve the layer metadata up to 5 times for `docker pull`. + - Merge existing config when committing. + - Fix panic in monitor. + - Disable daemon startup timeout. + - Fix issue #4681: add loopback interface when networking is disabled. + - Add failing test case for issue #4681. + - Send SIGTERM to child, instead of SIGKILL. + - Show the driver and the kernel version in `docker info` even when not in debug mode. + - Always symlink /dev/ptmx for libcontainer. This fixes console related problems. + - Fix issue caused by the absence of /etc/apparmor.d. + - Don't leave empty cidFile behind when failing to create the container. + - Improve deprecation message. + - Fix attach exit on darwin. + - devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). + - devicemapper: succeed immediately when removing non-existing devices. + - devicemapper: increase timeout in waitClose to 10 seconds. + - Remove goroutine leak on error. + - Update parseLxcInfo to comply with new lxc1.0 format. + +------------------------------------------------------------------- +Tue Mar 25 21:06:35 UTC 2014 - fcastelli@suse.com + +- Updated to docker 0.9.0: + * Builder: + - Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. + - Add error to docker build --rm. This adds missing error handling. + - Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. + - Make `--rm` the default for `docker build`. + * Remote API: + - Move code specific to the API to the api package. + - Fix header content type for the API. Makes all endpoints use proper content type. + - Fix registry auth & remove ping calls from CmdPush and CmdPull. + - Add newlines to the JSON stream functions. + * Runtime: + - Do not ping the registry from the CLI. All requests to registres flow through the daemon. + - Check for nil information return in the lxc driver. This fixes panics with older lxc versions. + - Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. + - Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. + - Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. + - Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. + - Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. + - Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. + - Fix `--run` in `docker commit`. This makes `docker commit --run` work again. + - Fix custom bridge related options. This makes custom bridges work again. + - Mount-bind the PTY as container console. This allows tmux/screen to run. + - Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. + - Add native exec driver which uses libcontainer and make it the default exec driver. + - Add support for handling extended attributes in archives. + - Set the container MTU to be the same as the host MTU. + - Add simple sha256 checksums for layers to speed up `docker push`. + - Improve kernel version parsing. + - Allow flag grouping (`docker run -it`). + - Remove chroot exec driver. + - Fix divide by zero to fix panic. + - Rewrite `docker rmi`. + - Fix docker info with lxc 1.0.0. + - Fix fedora tty with apparmor. + - Don't always append env vars, replace defaults with vars from config. + - Fix a goroutine leak. + - Switch to Go 1.2.1. + - Fix unique constraint error checks. + - Handle symlinks for Docker's data directory and for TMPDIR. + - Add deprecation warnings for flags (-flag is deprecated in favor of --flag) + - Add apparmor profile for the native execution driver. + - Move system specific code from archive to pkg/system. + - Fix duplicate signal for `docker run -i -t` (issue #3336). + - Return correct process pid for lxc. + - Add a -G option to specify the group which unix sockets belong to. + - Add `-f` flag to `docker rm` to force removal of running containers. + - Kill ghost containers and restart all ghost containers when the docker daemon restarts. + - Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. +- Updated requirements according to 0.9.0 release notes. + +------------------------------------------------------------------- +Wed Feb 19 08:35:27 UTC 2014 - fcastelli@suse.com + + - updated to Docker 0.8.1 + * Builder: + - Avoid extra mount/unmount during build. This removes an unneeded + mount/unmount operation which was causing problems with devicemapper + - Fix regression with ADD of tar files. This stops Docker from + decompressing tarballs added via ADD from the local file system + - Add error to `docker build --rm`. This adds a missing error check to + ensure failures to remove containers are detected and reported + * Remote API: + - Fix broken images API for version less than 1.7 + - Use the right encoding for all API endpoints which return JSON + - Move remote api client to api/ + - Queue calls to the API using generic socket wait + * Runtime: + - Fix the use of custom settings for bridges and custom bridges + - Refactor the devicemapper code to avoid many mount/unmount race + conditions and failures + - Remove two panics which could make Docker crash in some situations + - Don't ping registry from the CLI client + - Enable skip_block_zeroing for devicemapper. This stops devicemapper from + always zeroing entire blocks + - Fix --run in `docker commit`. This makes docker commit store `--run` + in the image configuration + - Remove directory when removing devicemapper device. This cleans up + leftover mount directories + - Drop NET_ADMIN capability for non-privileged containers. Unprivileged + containers can't change their network configuration + - Ensure `docker cp` stream is closed properly + - Avoid extra mount/unmount during container registration. This removes + an unneeded mount/unmount operation which was causing problems with + devicemapper + - Stop allowing tcp:// as a default tcp bin address which binds to + 127.0.0.1:4243 and remove the default port + - Mount-bind the PTY as container console. This allows tmux and screen to + run in a container + - Clean up archive closing. This fixes and improves archive handling + - Fix engine tests on systems where temp directories are symlinked + - Add test methods for save and load + - Avoid temporarily unmounting the container when restarting it. This + fixes a race for devicemapper during restart + - Support submodules when building from a GitHub repository + - Quote volume path to allow spaces + - Fix remote tar ADD behavior. This fixes a regression which was + causing Docker to extract tarballs + +------------------------------------------------------------------- +Thu Feb 13 09:07:39 UTC 2014 - fcastelli@suse.com + +- Ensure lxc >= 1.0 is not installed on the system, this version is + not compatible with docker yet. + +------------------------------------------------------------------- +Thu Feb 6 08:48:22 UTC 2014 - fcastelli@suse.com + +- updated to docker 0.8.0: + * Images and containers can be removed much faster + * Building an image from source with docker build is now much faster + * The Docker daemon starts and stops much faster + * The memory footprint of many common operations has been reduced, by + streaming files instead of buffering them in memory, fixing memory leaks, + and fixing various suboptimal memory allocations + * Several race conditions were fixed, making Docker more stable under very + high concurrency load. This makes Docker more stable and less likely to + crash and reduces the memory footprint of many common operations + * All packaging operations are now built on the Go language’s standard tar + implementation, which is bundled with Docker itself. This makes packaging + more portable across host distributions, and solves several issues caused + by quirks and incompatibilities between different distributions of tar + * Docker can now create, remove and modify larger numbers of containers and + images graciously thanks to more aggressive releasing of system resources. + For example the storage driver API now allows Docker to do reference + counting on mounts created by the drivers. With the ongoing changes to the + networking and execution subsystems of docker testing these areas have been + a focus of the refactoring. By moving these subsystems into separate + packages we can test, analyze, and monitor coverage and quality of these + packages + * The Docker daemon supports systemd socket activation + * Docker now ships with an experimental storage driver which uses the BTRFS + filesystem for copy-on-write + * The ADD instruction now supports caching, which avoids unnecessarily + re-uploading the same source content again and again when it hasn’t changed + * The new ONBUILD instruction adds to your image a “trigger” instruction to be + executed at a later time, when the image is used as the base for another + build + * Many components have been separated into smaller sub-packages, each with a + dedicated test suite. As a result the code is better-tested, more readable + and easier to change + * Docker is officially supported on Mac OSX + +------------------------------------------------------------------- +Fri Jan 31 18:14:09 UTC 2014 - f_koch@gmx.de + +- Fix udev file name + +------------------------------------------------------------------- +Sat Jan 25 14:04:50 UTC 2014 - fcastelli@suse.com + +- Added again the patch which forces the docker binary to look for the + dockerinit file into the right location. Docker's official build system + is still bugged. + +------------------------------------------------------------------- +Sat Jan 25 11:05:42 UTC 2014 - fcastelli@suse.com + + - updated to 0.7.6: + * Builder: + - Do not follow symlink outside of build context + * Runtime: + - Remount bind mounts when ro is specified + - Use https for fetching docker version + * Other: + - Inline the test.docker.io fingerprint + - Add ca-certificates to packaging documentation + - rpm changes: + * remove patch which forced docker to loook for the dockerinit binary into + /usr/lib64/docker. Docker's build system now accepts an environment + variable to address this issue. + * install udev rules inside of /usr/lib/udev as requested by rpmlint. +------------------------------------------------------------------- +Fri Jan 10 10:44:23 UTC 2014 - fcastelli@suse.com + + - updated to 0.7.5: + * Builder: + - Disable compression for build. More space usage but a much faster upload + - Fix ADD caching for certain paths + - Do not compress archive from git build + * Documentation: + * Fix error in GROUP add example + * Make sure the GPG fingerprint is inline in the documentation + * Give more specific advice on setting up signing of commits for DCO + * Runtime: + * Fix misspelled container names + * Do not add hostname when networking is disabled + * Return most recent image from the cache by date + * Return all errors from docker wait + * Add Content-Type Header "application/json" to GET /version and /info responses + * Other: + - Update DCO to version 1.1 + - Update Makefile to use "docker:GIT_BRANCH" as the generated image name + - Update Travis to check for new 1.1 DCO version + - 0.7.4 changes: + * Builder: + - Fix ADD caching issue with . prefixed path + - Fix docker build on devicemapper by reverting sparse file tar option + - Fix issue with file caching and prevent wrong cache hit + - Use same error handling while unmarshalling CMD and ENTRYPOINT + * Documentation: + - Simplify and streamline Amazon Quickstart + - Install instructions use unprefixed fedora image + - Update instructions for mtu flag for Docker on GCE + - Add Ubuntu Saucy to installation + - Fix for wrong version warning on master instead of latest + * Runtime: + - Only get the image's rootfs when we need to calculate the image size + - Correctly handle unmapping UDP ports + - Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build + - Fix login message to say pull instead of push + - Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN + - Make blank -H option default to the same as no -H was sent + - Extract cgroups utilities to own submodule + * Other: + - Add Travis CI configuration to validate DCO and gofmt requirements + - Add Developer Certificate of Origin Text + - Upgrade VBox Guest Additions + - Check standalone header when pinging a registry server + +------------------------------------------------------------------- +Tue Jan 7 12:48:30 UTC 2014 - fcastelli@suse.com + +- Spec file cleanup: removed useless SUSE versions checks around bash and zsh + completion sub packages. +- Updated runtime dependencies according to what reported by the official + documentation. + +------------------------------------------------------------------- +Tue Jan 7 08:26:37 UTC 2014 - fcastelli@suse.com + + - Updated to 0.7.3: + * Builder: + - Update ADD to use the image cache, based on a hash of the added content + - Add error message for empty Dockerfile + * Documentation: + - Fix outdated link to the "Introduction" on www.docker.io + - Update the docs to get wider when the screen does + - Add information about needing to install LXC when using raw binaries + - Update Fedora documentation to disentangle the docker and docker.io conflict + - Add a note about using the new `-mtu` flag in several GCE zones + - Add FrugalWare installation instructions + - Add a more complete example of `docker run` + - Fix API documentation for creating and starting Privileged containers + - Add missing "name" parameter documentation on "/containers/create" + - Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration + - Update the 1.8 API documentation with some additions that were added to the docs for 1.7 + * Hack: + - Add missing libdevmapper dependency to the packagers documentation + - Update minimum Go requirement to a hard line at Go 1.2+ + - Many minor improvements to the Vagrantfile + - Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) + - Add coverprofile generation reporting + - Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually + - Update Dockerfile to be more canonical and have less spurious warnings during build + - Fix some miscellaneous `docker pull` progress bar display issues + - Migrate more miscellaneous packages under the "pkg" folder + - Update TextMate highlighting to automatically be enabled for files named "Dockerfile" + - Reorganize syntax highlighting files under a common "contrib/syntax" directory + - Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation + - Add support for container names in bash completion + * Packaging: + - Add an official Docker client binary for Darwin (Mac OS X) + - Remove empty "Vendor" string and added "License" on deb package + - Add a stubbed version of "/etc/default/docker" in the deb package + * Runtime: + - Update layer application to extract tars in place, avoiding file churn while handling whiteouts + - Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) + - Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) + - Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions + - Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files + - Update container name validation to include '.' + - Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected + - Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler + - Update to use proper box-drawing characters everywhere in `docker images -tree` + - Move MTU setting from LXC configuration to directly use netlink + - Add `-S` option to external tar invocation for more efficient spare file handling + - Add arch/os info to User-Agent string, especially for registry requests + - Add `-mtu` option to Docker daemon for configuring MTU + - Fix `docker build` to exit with a non-zero exit code on error + - Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation + +------------------------------------------------------------------- +Wed Dec 18 08:35:14 UTC 2013 - fcastelli@suse.com + +- Removed docker.init file from OBS, it's no longer needed since we + moved to systemd. + +------------------------------------------------------------------- +Tue Dec 17 17:25:47 UTC 2013 - fcastelli@suse.com + +- Required git-core rather than the full package chain. + +------------------------------------------------------------------- +Tue Dec 17 10:59:08 UTC 2013 - fcastelli@suse.com + +- Fixed openSUSE 12.3 package by adding explicit requirement of + systemd-devel package at build time. + +------------------------------------------------------------------- +Tue Dec 17 10:09:04 UTC 2013 - fcastelli@suse.com + +- Updated to docker 0.7.2: + * Runtime: + - Validate container names on creation with standard regex + - Increase maximum image depth to 127 from 42 + - Continue to move api endpoints to the job api + - Add -bip flag to allow specification of dynamic bridge IP via CIDR + - Allow bridge creation when ipv6 is not enabled on certain systems + - Set hostname and IP address from within dockerinit + - Drop capabilities from within dockerinit + - Fix volumes on host when symlink is present the image + - Prevent deletion of image if ANY container is depending on it even if the container is not running + - Update docker push to use new progress display + - Use os.Lstat to allow mounting unix sockets when inspecting volumes + - Adjusted handling of inactive user login + - Add missing defines in devicemapper for older kernels + - Allow untag operations with no container validation + - Add auth config to docker build + * Documentation: + - Add more information about Docker logging + - Add RHEL documentation + - Add a direct example for changing the CMD that is run in a container + - Update Arch installation documentation + - Add section on Trusted Builds + - Add Network documentation page + * Other: + - Add new cover bundle for providing code coverage reporting + - Separate integration tests in bundles + - Make Tianon the hack maintainer + - Update mkimage-debootstrap with more tweaks for keeping images small + - Use https to get the install script + - Remove vendored dotcloud/tar now that Go 1.2 has been released +- Marked /etc/sysctl.d/200-docker.conf as configuration file within the spec file. +- Added 'ca-certificates-cacert' as runtime dependency, this is required to pull + containers from docker's official repository. + +------------------------------------------------------------------- +Thu Dec 12 08:41:30 UTC 2013 - fcastelli@suse.com + +- Removed dnsmasq dependency +- Added GNU tar as an explicit dependency +- Moved to systemd +- Updated to docker 0.7.1: + * Add @SvenDowideit as documentation maintainer + * Add links example + * Add documentation regarding ambassador pattern + * Add Google Cloud Platform docs + * Add dockerfile best practices + * Update doc for RHEL + * Update doc for registry + * Update Postgres examples + * Update doc for Ubuntu install + * Improve remote api doc +- modified patches: + * 0001-Allowed-installation-of-dockerinit-into-usr-lib64.patch: changed + to apply against the updated codebase. +------------------------------------------------------------------- +Thu Nov 28 10:18:12 UTC 2013 - fcastelli@suse.com + +- Updated runtime dependencies according to docker's official guidelines. + +------------------------------------------------------------------- +Thu Nov 28 09:25:05 UTC 2013 - fcastelli@suse.com + +- Fixed packaging errors: + * dockerinit binary was not built, causing docker to be unusable. + * added custom rpmlint rules. + +------------------------------------------------------------------- +Tue Nov 26 15:59:38 UTC 2013 - fcastelli@suse.com + * rpm changes: + * do no longer require a AUFS cable kernel at runtime. + * build docker using intree dependencies provided by upstream. + * created zsh completion sub-package. + + * 0.7.0 (2013-11-25) + - Storage drivers: choose from aufs, device mapper, vfs or btrfs. + - Standard Linux support: docker now runs on unmodified linux kernels and all major distributions. + - Links: compose complex software stacks by connecting containers to each other. + - Container naming: organize your containers by giving them memorable names. + - Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. + - Offline transfer: push and pull images to the filesystem without losing information. + - Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. + + * 0.6.7 (2013-11-21) + - Improved stability, fixes some race conditons + - Skip the volumes mounted when deleting the volumes of container. + - Fix layer size computation: handle hard links correctly + - Use the work Path for docker cp CONTAINER:PATH + - Fix tmp dir never cleanup + - Speedup docker ps + - More informative error message on name collisions + - Fix nameserver regex + - Always return long id's + - Fix container restart race condition + - Keep published ports on docker stop;docker start + - Fix container networking on Fedora + - Correctly express "any address" to iptables + - Fix network setup when reconnecting to ghost container + - Prevent deletion if image is used by a running container + - Lock around read operations in graph + - remote API: return full ID on docker rmi + - client: + - Add -tree option to images + - Offline image transfer + - Exit with status 2 on usage error and display usage on stderr + - Do not forward SIGCHLD to container + - Use string timestamp for docker events -since + + * 0.6.6 (2013-11-06) + - Ensure container name on register + - Fix regression in /etc/hosts + - Add lock around write operations in graph + - Check if port is valid + - Fix restart runtime error with ghost container networking + - Added some more colors and animals to increase the pool of generated names + - Fix issues in docker inspect + - Escape apparmor confinement + - Set environment variables using a file. + - Prevent docker insert to erase something + - Prevent DNS server conflicts in CreateBridgeIface + - Validate bind mounts on the server side + - Use parent image config in docker build + - Fix regression in /etc/hosts + - Client: + * Add -P flag to publish all exposed ports + * Add -notrunc and -q flags to docker history + * Fix docker commit, tag and import usage + * Add stars, trusted builds and library flags in docker search + * Fix docker logs with tty + - RemoteAPI: + * Make /events API send headers immediately + * Do not split last column docker top + + Add size to history + + * 0.6.5 (2013-10-29) + - Containers can now be named + - Containers can now be linked together for service discovery + - 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors + - Automatically start crashed containers after a reboot + - Expose IP, port, and proto as separate environment vars for container links + - Allow ports to be published to specific ips + - Prohibit inter-container communication by default + - Ignore ErrClosedPipe for stdin in Container.Attach + - Remove unused field kernelVersion + - Fix issue when mounting subdirectories of /mnt in container + - Fix untag during removal of images + - Check return value of syscall.Chdir when changing working directory inside dockerinit + - Client: + - Only pass stdin to hijack when needed to avoid closed pipe errors + - Use less reflection in command-line method invocation + - Monitor the tty size after starting the container, not prior + - Remove useless os.Exit() calls after log.Fatal + - Documentation: Fix the flags for nc in example + - Testing: Remove warnings and prevent mount issues + - Testing: Change logic for tty resize to avoid warning in tests + - Builder: Fix race condition in docker build with verbose output + - Registry: Fix content-type for PushImageJSONIndex method + - Contrib: Improve helper tools to generate debian and Arch linux server images + + * 0.6.4 (2013-10-16) + - Add cleanup of container when Start() fails + - Add better comments to utils/stdcopy.go + - Add utils.Errorf for error logging + - Add -rm to docker run for removing a container on exit + - Remove error messages which are not actually errors + - Fix `docker rm` with volumes + - Fix some error cases where a HTTP body might not be closed + - Fix panic with wrong dockercfg file + - Fix the attach behavior with -i + - Record termination time in state. + - Use empty string so TempDir uses the OS's temp dir automatically + - Make sure to close the network allocators + - Autorestart containers by default + - Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` + - lxc: Allow set_file_cap capability in container + - Move run -rm to the cli only + - Split stdout stderr + - Always create a new session for the container + - Builder: Abort build if mergeConfig returns an error and fix duplicate error message + - Packaging: Remove deprecated packaging directory + - Registry: Use correct auth config when logging in. + - Registry: Fix the error message so it is the same as the regex + +------------------------------------------------------------------- +Wed Oct 2 12:04:09 UTC 2013 - fcastelli@suse.com + + * 0.6.3 (2013-09-23) + + - Client: Fix detach issue + - Runtime: Only copy and change permissions on non-bindmount volumes + - Registry: Update regular expression to match index + * Runtime: Allow multiple volumes-from + * Packaging: Download apt key over HTTPS + * Documentation: Update section on extracting the docker binary after build + * Documentation: Update development environment docs for new build process + * Documentation: Remove 'base' image from documentation + * Packaging: Add 'docker' group on install for ubuntu package + - Runtime: Fix HTTP imports from STDIN + +------------------------------------------------------------------- +Thu Sep 26 10:33:21 UTC 2013 - fcastelli@suse.com + +- Fixed build on SLE_11_SP3 + +------------------------------------------------------------------- +Mon Sep 23 10:17:17 UTC 2013 - fcastelli@suse.com + +- Fixed git commit version: the wrong version was showed by 'docker version'. + +------------------------------------------------------------------- +Mon Sep 23 09:56:42 UTC 2013 - fcastelli@suse.com + + * 0.6.2 (2013-09-17) + + Hack: Vendor all dependencies + Builder: Add -rm option in order to remove intermediate containers + Runtime: Add domainname support + Runtime: Implement image filtering with path.Match + Builder: Allow multiline for the RUN instruction + Runtime: Remove unnecesasry warnings + Runtime: Only mount the hostname file when the config exists + Runtime: Handle signals within the docker login command + Runtime: Remove os/user dependency + Registry: Implement login with private registry + Remote API: Bump to v1.5 + Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. + Documentation: General improvments + Runtime: UID and GID are now also applied to volumes + Runtime: docker start set error code upon error + Runtime: docker run set the same error code as the process started + Registry: Fix push issues + + +------------------------------------------------------------------- +Mon Aug 26 14:22:34 UTC 2013 - fcastelli@suse.com + + * 0.6.1 (2013-08-23) + + Registry: Pass "meta" headers in API calls to the registry + Packaging: Use correct upstart script with new build tool + Packaging: Use libffi-dev, don't build it from sources + Packaging: Removed duplicate mercurial install command + + * 0.6.0 (2013-08-22) + + Runtime: Load authConfig only when needed and fix useless WARNING + Runtime: Add lxc-conf flag to allow custom lxc options + Runtime: Fix race conditions in parallel pull + Runtime: Improve CMD, ENTRYPOINT, and attach docs. + Documentation: Small fix to docs regarding adding docker groups + Documentation: Add MongoDB image example + Builder: Add USER instruction do Dockerfile + Documentation: updated default -H docs + Remote API: Sort Images by most recent creation date. + Builder: Add workdir support for the Buildfile + Runtime: Add an option to set the working directory + Runtime: Show tag used when image is missing + Documentation: Update readme with dependencies for building + Documentation: Add instructions for creating and using the docker group + Remote API: Reworking opaque requests in registry module + Runtime: Fix Graph ByParent() to generate list of child images per parent image. + Runtime: Add Image name to LogEvent tests + Documentation: Add sudo to examples and installation to documentation + Hack: Bash Completion: Limit commands to containers of a relevant state + Remote API: Add image name in /events + Runtime: Apply volumes-from before creating volumes + Runtime: Make docker run handle SIGINT/SIGTERM + Runtime: Prevent crash when .dockercfg not readable + Hack: Add docker dependencies coverage testing into docker-ci + Runtime: Add -privileged flag and relevant tests, docs, and examples + Packaging: Docker-brew 0.5.2 support and memory footprint reduction + Runtime: Install script should be fetched over https, not http. + Packaging: Add new docker dependencies into docker-ci + Runtime: Use Go 1.1.2 for dockerbuilder + Registry: Improve auth push + Runtime: API, issue 1471: Use groups for socket permissions + Documentation: PostgreSQL service example in documentation + Contrib: bash completion script + Tests: Improve TestKillDifferentUser to prevent timeout on buildbot + Documentation: Fix typo in docs for docker run -dns + Documentation: Adding a reference to ps -a + Runtime: Correctly detect IPv4 forwarding + Packaging: Revert "docker.upstart: avoid spawning a sh process" + Runtime: Use ranged for loop on channels + Runtime: Fix typo: fmt.Sprint -> fmt.Sprintf + Tests: Fix typo in TestBindMounts (runContainer called without image) + Runtime: add websocket support to /container//attach/ws + Runtime: Mount /dev/shm as a tmpfs + Builder: Only count known instructions as build steps + Builder: Fix docker build and docker events output + Runtime: switch from http to https for get.docker.io + Tests: Improve TestGetContainersTop so it does not rely on sleep + Packaging: Docker-brew and Docker standard library + Testing: Add some tests in server and utils + Packaging: Release docker with docker + Builder: Make sure ENV instruction within build perform a commit each time + Packaging: Fix the upstart script generated by get.docker.io + Runtime: fix small \n error un docker build + Runtime: Let userland proxy handle container-bound traffic + Runtime: Updated the Docker CLI to specify a value for the "Host" header. + Runtime: Add warning when net.ipv4.ip_forwarding = 0 + Registry: Registry unit tests + mock registry + Runtime: fixed #910. print user name to docker info output + Builder: Forbid certain paths within docker build ADD + Runtime: change network range to avoid conflict with EC2 DNS + Tests: Relax the lo interface test to allow iface index != 1 + Documentation: Suggest installing linux-headers by default. + Documentation: Change the twitter handle + Client: Add docker cp command and copy api endpoint to copy container files/folders to the host + Remote API: Use mime pkg to parse Content-Type + Runtime: Reduce connect and read timeout when pinging the registry + Documentation: Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 + Packaging: Enabled the docs to generate manpages. + Runtime: Parallel pull + Runtime: Handle ip route showing mask-less IP addresses + Documentation: Clarify Amazon EC2 installation + Documentation: 'Base' image is deprecated and should no longer be referenced in the docs. + Runtime: Fix to "Inject dockerinit at /.dockerinit" + Runtime: Allow ENTRYPOINT without CMD + Runtime: Always consider localhost as a domain name when parsing the FQN repos name + Remote API: 650 http utils and user agent field + Documentation: fix a typo in the ubuntu installation guide + Builder: Repository name (and optionally a tag) in build usage + Documentation: Move note about officially supported kernel + Packaging: Revert "Bind daemon to 0.0.0.0 in Vagrant. + Builder: Add no cache for docker build + Runtime: Add hostname to environment + Runtime: Add last stable version in docker version + Builder: Make sure ADD will create everything in 0755 + Documentation: Add ufw doc + Tests: Add registry functional test to docker-ci + Documentation: Solved the logo being squished in Safari + Runtime: Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete + Runtime: Refactor checksum + Runtime: Improve connect message with socket error + Documentation: Added information about Docker's high level tools over LXC. + Don't read from stdout when only attached to stdin + +------------------------------------------------------------------- +Wed Aug 7 15:11:23 UTC 2013 - fcastelli@suse.com + + * added commits required to get docker working with a private registry. + + * 0.5.1 (2013-07-30) + + API: Docker client now sets useragent (RFC 2616) + Runtime: Add ps args to docker top + Runtime: Add support for container ID files (pidfile like) + Runtime: Add container=lxc in default env + Runtime: Support networkless containers with docker run -n and docker -d -b=none + API: Add /events endpoint + Builder: ADD command now understands URLs + Builder: CmdAdd and CmdEnv now respect Dockerfile-set ENV variables + Hack: Simplify unit tests with helpers + Hack: Improve docker.upstart event + Hack: Add coverage testing into docker-ci + Runtime: Stdout/stderr logs are now stored in the same file as JSON + Runtime: Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. + Runtime: Change .dockercfg format to json and support multiple auth remote + Runtime: Do not override volumes from config + Runtime: Fix issue with EXPOSE override + Builder: Create directories with 755 instead of 700 within ADD instruction + + +------------------------------------------------------------------- +Thu Jul 25 09:43:48 UTC 2013 - fcastelli@suse.com + + 0.5.0 (2013-07-17) + + Runtime: List all processes running inside a container with 'docker top' + Runtime: Host directories can be mounted as volumes with 'docker run -v' + Runtime: Containers can expose public UDP ports (eg, '-p 123/udp') + Runtime: Optionally specify an exact public port (eg. '-p 80:4500') + Registry: New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries + Builder: ENTRYPOINT instruction sets a default binary entry point to a container + Builder: VOLUME instruction marks a part of the container as persistent data + Builder: 'docker build' displays the full output of a build by default + Runtime: 'docker login' supports additional options + Runtime: Dont save a container's hostname when committing an image. + Registry: Fix issues when uploading images to a private registry + + 0.4.8 (2013-07-01) + + Builder: New build operation ENTRYPOINT adds an executable entry point to the container. + Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. + Tests: Fix issues in the test suite + + 0.4.7 (2013-06-28) + + Registry: easier push/pull to a custom registry + Remote API: the progress bar updates faster when downloading and uploading large files + Remote API: fix a bug in the optional unix socket transport + Runtime: improve detection of kernel version + Runtime: host directories can be mounted as volumes with 'docker run -b' + Runtime: fix an issue when only attaching to stdin + Runtime: use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts + Hack: improve test suite and dev environment + Hack: remove dependency on unit tests on 'os/user' + Documentation: add terminology section + + 0.4.6 (2013-06-22) + + Runtime: fix a bug which caused creation of empty images (and volumes) to crash. + + 0.4.5 (2013-06-21) + + Builder: 'docker build git://URL' fetches and builds a remote git repository + Runtime: 'docker ps -s' optionally prints container size + Tests: Improved and simplified + Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. + Builder: fix a regression when using ADD with single regular file. + + 0.4.4 (2013-06-19) + + Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. + + 0.4.3 (2013-06-19) + + Builder: ADD of a local file will detect tar archives and unpack them + Runtime: Remove bsdtar dependency + Runtime: Add unix socket and multiple -H support + Runtime: Prevent rm of running containers + Runtime: Use go1.1 cookiejar + Builder: ADD improvements: use tar for copy + automatically unpack local archives + Builder: ADD uses tar/untar for copies instead of calling 'cp -ar' + Builder: nicer output for 'docker build' + Builder: fixed the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. + Client: HumanReadable ProgressBar sizes in pull + Client: Fix docker version's git commit output + API: Send all tags on History API call + API: Add tag lookup to history command. Fixes #882 + Runtime: Fix issue detaching from running TTY container + Runtime: Forbid parralel push/pull for a single image/repo. Fixes #311 + Runtime: Fix race condition within Run command when attaching. + Builder: fix a bug which caused builds to fail if ADD was the first command + Documentation: fix missing command in irc bouncer example + + 0.4.2 (2013-06-17) + + Packaging: Bumped version to work around an Ubuntu bug + + 0.4.1 (2013-06-17) + + Remote Api: Add flag to enable cross domain requests + Remote Api/Client: Add images and containers sizes in docker ps and docker images + Runtime: Configure dns configuration host-wide with 'docker -d -dns' + Runtime: Detect faulty DNS configuration and replace it with a public default + Runtime: allow docker run : + Runtime: you can now specify public port (ex: -p 80:4500) + Client: allow multiple params in inspect + Client: Print the container id before the hijack in docker run + Registry: add regexp check on repo's name + Registry: Move auth to the client + Runtime: improved image removal to garbage-collect unreferenced parents + Vagrantfile: Add the rest api port to vagrantfile's port_forward + Upgrade to Go 1.1 + Builder: don't ignore last line in Dockerfile when it doesn't end with \n + Registry: Remove login check on pull + + 0.4.0 (2013-06-03) + + Introducing Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + Introducing Remote API: control Docker programmatically using a simple HTTP/json API + Runtime: various reliability and usability improvements + + 0.3.4 (2013-05-30) + + Builder: 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + Builder: 'docker build -t FOO' applies the tag FOO to the newly built container. + Runtime: interactive TTYs correctly handle window resize + Runtime: fix how configuration is merged between layers + Remote API: split stdout and stderr on 'docker run' + Remote API: optionally listen on a different IP and port (use at your own risk) + Documentation: improved install instructions. + + 0.3.3 (2013-05-23) + + Registry: Fix push regression + Various bugfixes + + 0.3.2 (2013-05-09) + + Runtime: Store the actual archive on commit + Registry: Improve the checksum process + Registry: Use the size to have a good progress bar while pushing + Registry: Use the actual archive if it exists in order to speed up the push + Registry: Fix error 400 on push + + 0.3.1 (2013-05-08) + + Builder: Implement the autorun capability within docker builder + Builder: Add caching to docker builder + Builder: Add support for docker builder with native API as top level command + Runtime: Add go version to debug infos + Builder: Implement ENV within docker builder + Registry: Add docker search top level command in order to search a repository + Images: output graph of images to dot (graphviz) + Documentation: new introduction and high-level overview + Documentation: Add the documentation for docker builder + Website: new high-level overview + Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc + Images: fix ByParent function + Builder: Check the command existance prior create and add Unit tests for the case + Registry: Fix pull for official images with specific tag + Registry: Fix issue when login in with a different user and trying to push + Documentation: CSS fix for docker documentation to make REST API docs look better. + Documentation: Fixed CouchDB example page header mistake + Documentation: fixed README formatting + Registry: Improve checksum - async calculation + Runtime: kernel version - don't show the dash if flavor is empty + Documentation: updated www.docker.io website. + Builder: use any whitespaces instead of tabs + Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker + + +------------------------------------------------------------------- +Tue May 7 09:09:34 UTC 2013 - fcastelli@suse.com + +* Update to 0.3.0 (2013-05-06) + - Registry: Implement the new registry + - Documentation: new example: sharing data between 2 couchdb databases + - Runtime: Fix the command existance check + - Runtime: strings.Split may return an empty string on no match + - Runtime: Fix an index out of range crash if cgroup memory is not + - Documentation: Various improvments + - Vagrant: Use only one deb line in /etc/apt + +------------------------------------------------------------------- +Mon May 6 16:00:00 UTC 2013 - fcastelli@suse.com + +- Update to version 0.2.2 + + * 0.2.2 (2013-05-03) + - Support for data volumes ('docker run -v=PATH') + - Share data volumes between containers ('docker run -volumes-from') + - Improved documentation + - Upgrade to Go 1.0.3 + - Various upgrades to the dev environment for contributors + + * 0.2.1 (2013-05-01) + - 'docker commit -run' bundles a layer with default runtime options: command, ports etc. + - Improve install process on Vagrant + - New Dockerfile operation: "maintainer" + - New Dockerfile operation: "expose" + - New Dockerfile operation: "cmd" + - Contrib script to build a Debian base layer + - 'docker -d -r': restart crashed containers at daemon startup + - Runtime: improve test coverage + + * 0.2.0 (2013-04-23) + - Runtime: ghost containers can be killed and waited for + - Documentation: update install intructions + - Packaging: fix Vagrantfile + - Development: automate releasing binaries and ubuntu packages + - Add a changelog + - Various bugfixes + + * 0.1.8 (2013-04-22) + - Dynamically detect cgroup capabilities + - Issue stability warning on kernels <3.8 + - 'docker push' buffers on disk instead of memory + - Fix 'docker diff' for removed files + - Fix 'docker stop' for ghost containers + - Fix handling of pidfile + - Various bugfixes and stability improvements + + * 0.1.7 (2013-04-18) + - Container ports are available on localhost + - 'docker ps' shows allocated TCP ports + - Contributors can run 'make hack' to start a continuous integration VM + - Streamline ubuntu packaging & uploading + - Various bugfixes and stability improvements + + * 0.1.6 (2013-04-17) + - Record the author an image with 'docker commit -author' + + * 0.1.5 (2013-04-17) + - Disable standalone mode + - Use a custom DNS resolver with 'docker -d -dns' + - Detect ghost containers + - Improve diagnosis of missing system capabilities + - Allow disabling memory limits at compile time + - Add debian packaging + - Documentation: installing on Arch Linux + - Documentation: running Redis on docker + - Fixed lxc 0.9 compatibility + - Automatically load aufs module + - Various bugfixes and stability improvements + + * 0.1.4 (2013-04-09): + - Full support for TTY emulation + - Detach from a TTY session with the escape sequence C-p C-q + - Various bugfixes and stability improvements + - Minor UI improvements + - Automatically create our own bridge interface 'docker0' + + +------------------------------------------------------------------- +Wed Apr 10 10:31:11 UTC 2013 - fcastelli@suse.com + +- Apply patch that creates pidfile. +- Update the init script to look for the pidfile under the right location. +- Update the init script to acknowledge the code taken from Ubuntu's lxc-net script. + +------------------------------------------------------------------- +Tue Apr 9 08:24:33 UTC 2013 - fcastelli@suse.com + +- create initial package using version 0.1.3 from git commit 0767916adedb01 diff --git a/docker.service b/docker.service new file mode 100644 index 0000000..5a27ab0 --- /dev/null +++ b/docker.service @@ -0,0 +1,45 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=http://docs.docker.com +After=network.target lvm2-monitor.service firewalld.service +# We don't use the docker socket activation, but doing this ensures that the +# docker.socket unit is alive while Docker is (docker.socket has BindsTo, so we +# only need a weak requirement to make sure starting docker.service also +# "starts" the socket service). Forcefully stopping docker.socket will not +# cause docker to die, but there's no nice workaround for that. +Wants=docker.socket + +[Service] +EnvironmentFile=/etc/sysconfig/docker + +# While Docker has support for socket activation (-H fd://), this is not +# enabled by default because enabling socket activation means that on boot your +# containers won't start until someone tries to administer the Docker daemon. +Type=notify +ExecStart=/usr/bin/dockerd --add-runtime oci=/usr/sbin/runc $DOCKER_NETWORK_OPTIONS $DOCKER_OPTS +ExecReload=/bin/kill -s HUP $MAINPID + +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=1048576 +LimitNPROC=infinity +LimitCORE=infinity + +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this property. +TasksMax=infinity + +# Set delegate yes so that systemd does not reset the cgroups of docker containers +# Only systemd 218 and above support this property. +Delegate=yes + +# Kill only the docker process, not all processes in the cgroup. +KillMode=process + +# Restart the docker process if it exits prematurely. +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/docker.socket b/docker.socket new file mode 100644 index 0000000..13fe927 --- /dev/null +++ b/docker.socket @@ -0,0 +1,18 @@ +[Unit] +Description=Docker Socket for the API +# We use BindsTo in order to make sure that you cannot use socket-activation +# with Docker (Docker must always start at boot if enabled, otherwise +# containers will not run until some administrator interacts with Docker). +BindsTo=docker.service + +[Socket] +# If /var/run is not implemented as a symlink to /run, you may need to +# specify ListenStream=/var/run/docker.sock instead. +ListenStream=/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target + diff --git a/docker.spec b/docker.spec new file mode 100644 index 0000000..b4f200e --- /dev/null +++ b/docker.spec @@ -0,0 +1,417 @@ +# +# spec file for package docker +# +# Copyright (c) 2023 SUSE LLC +# +# All modifications and additions to the file contributed by third parties +# remain the property of their copyright owners, unless otherwise agreed +# upon. The license for this file, and modifications and additions to the +# file, is the same license as for the pristine package itself (unless the +# license for the pristine package is not an Open Source License, in which +# case the license is the MIT License). An "Open Source License" is a +# license that conforms to the Open Source Definition (Version 1.9) +# published by the Open Source Initiative. + +# Please submit bugfixes or comments via https://bugs.opensuse.org/ +# +# nodebuginfo + +%bcond_without apparmor + +# Where important update information will be stored, such that an administrator +# is guaranteed to see the relevant warning. +%define update_messages %{_localstatedir}/adm/update-messages/%{name}-%{version}-%{release} + +#Compat macro for new _fillupdir macro introduced in Nov 2017 +%if ! %{defined _fillupdir} + %define _fillupdir /var/adm/fillup-templates +%endif + +# Used when generating the "build" information for Docker version. The value of +# git_commit_epoch is unused here (we use SOURCE_DATE_EPOCH, which rpm +# helpfully injects into our build environment from the changelog). If you want +# to generate a new git_commit_epoch, use this: +# $ date --date="$(git show --format=fuller --date=iso $COMMIT_ID | grep -oP '(?<=^CommitDate: ).*')" '+%s' +%define real_version 26.1.5 +%define git_version 411e817ddf71 +%define git_commit_epoch 1721763388 + +Name: docker +Version: %{real_version}_ce +# This "nice version" is so that docker --version gives a result that can be +# parsed by other people. boo#1182476 +%define nice_version %{real_version}-ce +Release: 0 +Summary: The Moby-project Linux container runtime +License: Apache-2.0 +Group: System/Management +URL: http://www.docker.io +Source: %{name}-%{version}_%{git_version}.tar.xz +Source1: %{name}-cli-%{version}.tar.xz +Source3: docker-rpmlintrc +# TODO: Move these source files to somewhere nicer. +Source100: docker.service +Source101: docker.socket +Source110: 80-docker.rules +Source120: sysconfig.docker +Source130: README_SUSE.md +Source140: docker-audit.rules +Source150: docker-daemon.json +Source160: docker.sysusers +# NOTE: All of these patches are maintained in +# in the suse-v branch. Make sure you update the patches in that +# branch and then git-format-patch the patch here. +# SUSE-FEATURE: Adds the /run/secrets mountpoint inside all Docker containers +# which is not snapshotted when images are committed. +Patch100: 0001-SECRETS-daemon-allow-directory-creation-in-run-secre.patch +Patch101: 0002-SECRETS-SUSE-implement-SUSE-container-secrets.patch +# UPSTREAM: Revert of upstream patch to keep SLE-12 build working. +Patch200: 0003-BUILD-SLE12-revert-graphdriver-btrfs-use-kernel-UAPI.patch +# UPSTREAM: Backport of . +Patch201: 0004-bsc1073877-apparmor-clobber-docker-default-profile-o.patch +# UPSTREAM: Revert of upstream patches to make apparmor work on SLE 12. +Patch202: 0005-SLE12-revert-apparmor-remove-version-conditionals-fr.patch +# UPSTREAM: Backport of and +# . +Patch203: 0006-bsc1221916-update-to-patched-buildkit-version-to-fix.patch +# UPSTREAM: Backport of . +Patch204: 0007-bsc1214855-volume-use-AtomicWriteFile-to-save-volume.patch +# UPSTREAM: Backport of . +Patch900: cli-0001-docs-include-required-tools-in-source-tree.patch +BuildRequires: audit +BuildRequires: bash-completion +BuildRequires: ca-certificates +BuildRequires: device-mapper-devel >= 1.2.68 +BuildRequires: fdupes +%if %{with apparmor} +BuildRequires: libapparmor-devel +%endif +BuildRequires: libbtrfs-devel >= 3.8 +BuildRequires: libseccomp-devel >= 2.2 +BuildRequires: libtool +BuildRequires: linux-glibc-devel +BuildRequires: procps +BuildRequires: sqlite3-devel +BuildRequires: zsh +BuildRequires: fish +BuildRequires: go-go-md2man +BuildRequires: pkgconfig(libsystemd) +BuildRequires: sysuser-tools +BuildRequires: golang(API) = 1.21 +%if %{with apparmor} +%if 0%{?sle_version} >= 150000 +# This conditional only works on rpm>=4.13, which SLE 12 doesn't have. But we +# don't need to support Docker+selinux for SLE 12 anyway. +Requires: (apparmor-parser or container-selinux) +# This recommends is added to make sure that even if you have container-selinux +# installed you will still be prompted to install apparmor-parser which Docker +# requires to apply AppArmor profiles (for SELinux systems this doesn't matter +# but if you switch back to AppArmor on reboot this would result in insecure +# containers). +Recommends: apparmor-parser +%else +Requires: apparmor-parser +%endif +%else +Requires: container-selinux +%endif +Requires: ca-certificates-mozilla +# The docker-proxy binary used to be in a separate package. We obsolete it, +# since now docker-proxy is maintained as part of this package. +Obsoletes: docker-libnetwork < 0.7.0.2 +Provides: docker-libnetwork = 0.7.0.2.%{version} +# Required to actually run containers. We require the minimum version that is +# pinned by Docker, but in order to avoid headaches we allow for updates. +Requires: runc >= 1.1.9 +Requires: containerd >= 1.7.3 +# Needed for --init support. We don't use "tini", we use our own implementation +# which handles edge-cases better. +Requires: catatonit +# Provides mkfs.ext4 - used by Docker when devicemapper storage driver is used +Requires: e2fsprogs +Requires: iproute2 >= 3.5 +Requires: iptables >= 1.4 +Requires: procps +Requires: tar >= 1.26 +Requires: xz >= 4.9 +%?sysusers_requires +Requires(post): %fillup_prereq +Requires(post): udev +Requires(post): shadow +# Not necessary, but must be installed when the underlying system is +# configured to use lvm and the user doesn't explicitly provide a +# different storage-driver than devicemapper +Recommends: lvm2 >= 2.2.89 +Recommends: git-core >= 1.7 +# Required for "docker buildx" support. +Recommends: %{name}-buildx +Recommends: %{name}-rootless-extras +ExcludeArch: s390 ppc + +%description +Docker complements LXC with a high-level API which operates at the process +level. It runs unix processes with strong guarantees of isolation and +repeatability across servers. + +Docker is a great building block for automating distributed systems: large-scale +web deployments, database clusters, continuous deployment systems, private PaaS, +service-oriented architectures, etc. + +%package rootless-extras +Summary: Rootless support for Docker +Group: System/Management +Requires: %{name} = %{version} +Requires: slirp4netns >= 0.4 +Requires: fuse-overlayfs >= 0.7 +Requires: rootlesskit +BuildArch: noarch + +%description rootless-extras +Rootless support for Docker. +Use dockerd-rootless.sh to run the daemon. +Use dockerd-rootless-setuptool.sh to setup systemd for dockerd-rootless.sh. + +%package bash-completion +Summary: Bash Completion for %{name} +Group: System/Shells +Requires: %{name} = %{version} +Requires: bash-completion +Supplements: packageand(%{name}:bash-completion) +BuildArch: noarch + +%description bash-completion +Bash command line completion support for %{name}. + +%package zsh-completion +Summary: Zsh Completion for %{name} +Group: System/Shells +Requires: %{name} = %{version} +Requires: zsh +Supplements: packageand(%{name}:zsh) +BuildArch: noarch + +%description zsh-completion +Zsh command line completion support for %{name}. + +%package fish-completion +Summary: Fish completion for %{name} +Group: System/Shells +Requires: %{name} = %{version} +Requires: fish +Supplements: packageand(%{name}:fish) +BuildArch: noarch + +%description fish-completion +Fish command line completion support for %{name}. + +%prep +# docker-cli +%define cli_builddir %{_builddir}/%{name}-cli-%{version} +%setup -q -T -b 1 -n %{name}-cli-%{version} +[ "%{cli_builddir}" = "$PWD" ] +# offline manpages +%patch -P900 -p1 + +# docker +%define docker_builddir %{_builddir}/%{name}-%{version}_%{git_version} +%setup -q -n %{name}-%{version}_%{git_version} +[ "%{docker_builddir}" = "$PWD" ] +# README_SUSE.md for documentation. +cp %{SOURCE130} . + +%if 0%{?is_opensuse} == 0 +# PATCH-SUSE: Secrets patches. +%patch -P100 -p1 +%patch -P101 -p1 +%endif +%if 0%{?sle_version} == 120000 +# Patches to build on SLE-12. +%patch -P200 -p1 +%endif +# bsc#1099277 +%patch -P201 -p1 +# Solves apparmor issues on SLE-12, but okay for newer SLE versions too. +%patch -P202 -p1 +# bsc#1221916 +%patch -P203 -p1 +# bsc#1214855 +%patch -P204 -p1 + +%build +%sysusers_generate_pre %{SOURCE160} %{name} %{name}.conf + +BUILDTAGS="exclude_graphdriver_aufs apparmor selinux seccomp pkcs11" +%if 0%{?sle_version} == 120000 + # Allow us to build with older distros but still have deferred removal + # support at runtime. We only use this when building on SLE12, because + # later openSUSE/SLE versions have a new enough libdevicemapper to not + # require the runtime checking. + BUILDTAGS="libdm_dlsym_deferred_remove $BUILDTAGS" +%endif + +export AUTO_GOPATH=1 +# Make sure we always build PIC code. bsc#1048046 +export BUILDFLAGS="-buildmode=pie" +# Specify all of the versioning information. We use SOURCE_DATE_EPOCH if it's +# been injected by rpmbuild, otherwise we use the hardcoded git_commit_epoch +# generated above. boo#1064781 +export VERSION="%{nice_version}" +export DOCKER_GITCOMMIT="%{git_version}" +export GITCOMMIT="%{git_version}" +export SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-%{git_commit_epoch}}" +export BUILDTIME="$(date -u -d "@$SOURCE_DATE_EPOCH" --rfc-3339 ns 2>/dev/null | sed -e 's/ /T/')" + +################### +## DOCKER ENGINE ## +################### + +pushd "%{docker_builddir}" +# use go module for build +ln -s {vendor,go}.mod +ln -s {vendor,go}.sum +./hack/make.sh dynbinary +popd + +################### +## DOCKER CLIENT ## +################### + +pushd "%{cli_builddir}" +# use go module for build +ln -s {vendor,go}.mod +ln -s {vendor,go}.sum +make DISABLE_WARN_OUTSIDE_CONTAINER=1 dynbinary manpages +popd + +%install +install -Dd -m0755 \ + %{buildroot}%{_sysconfdir}/init.d \ + %{buildroot}%{_bindir} \ + %{buildroot}%{_sbindir} + +# docker daemon +install -D -m0755 %{docker_builddir}/bundles/dynbinary-daemon/dockerd %{buildroot}/%{_bindir}/dockerd +# docker proxy +install -D -m0755 %{docker_builddir}/bundles/dynbinary-daemon/docker-proxy %{buildroot}/%{_bindir}/docker-proxy + +# cli-plugins/ +install -d %{buildroot}/usr/lib/docker/cli-plugins + +# /var/lib/docker +install -d %{buildroot}/%{_localstatedir}/lib/docker +# daemon.json config file +install -D -m0644 %{SOURCE150} %{buildroot}%{_sysconfdir}/docker/daemon.json + +# docker cli +install -D -m0755 %{cli_builddir}/build/docker %{buildroot}/%{_bindir}/docker +install -D -m0644 %{cli_builddir}/contrib/completion/bash/docker "%{buildroot}%{_datarootdir}/bash-completion/completions/%{name}" +install -D -m0644 %{cli_builddir}/contrib/completion/zsh/_docker "%{buildroot}%{_sysconfdir}/zsh_completion.d/_%{name}" +install -D -m0644 %{cli_builddir}/contrib/completion/fish/docker.fish "%{buildroot}/%{_datadir}/fish/vendor_completions.d/%{name}.fish" + +# systemd service +install -D -m0644 %{SOURCE100} %{buildroot}%{_unitdir}/%{name}.service +install -D -m0644 %{SOURCE101} %{buildroot}%{_unitdir}/%{name}.socket +ln -sf service %{buildroot}%{_sbindir}/rcdocker + +# udev rules that prevents dolphin to show all docker devices and slows down +# upstream report https://bugs.kde.org/show_bug.cgi?id=329930 +install -D -m0644 %{SOURCE110} %{buildroot}%{_udevrulesdir}/80-%{name}.rules + +# audit rules +install -D -m0640 %{SOURCE140} %{buildroot}%{_sysconfdir}/audit/rules.d/%{name}.rules + +# sysconfig file +install -D -m0644 %{SOURCE120} %{buildroot}%{_fillupdir}/sysconfig.docker + +# install manpages (using the ones from the engine) +install -d %{buildroot}%{_mandir}/man1 +install -p -m0644 %{cli_builddir}/man/man1/*.1 %{buildroot}%{_mandir}/man1 +install -d %{buildroot}%{_mandir}/man5 +install -p -m0644 %{cli_builddir}/man/man5/Dockerfile.5 %{buildroot}%{_mandir}/man5 +install -d %{buildroot}%{_mandir}/man8 +install -p -m0644 %{cli_builddir}/man/man8/*.8 %{buildroot}%{_mandir}/man8 + +# sysusers.d +install -D -m0644 %{SOURCE160} %{buildroot}%{_sysusersdir}/%{name}.conf + +# rootless extras +install -D -p -m 0755 contrib/dockerd-rootless.sh %{buildroot}/%{_bindir}/dockerd-rootless.sh +install -D -p -m 0755 contrib/dockerd-rootless-setuptool.sh %{buildroot}/%{_bindir}/dockerd-rootless-setuptool.sh + +%fdupes %{buildroot} + +%pre -f %{name}.pre +# /etc/sub[ug]id should exist already (it's part of shadow-utils), but older +# distros don't have it. Docker just parses it and doesn't need any special +# shadow-utils helpers. +touch /etc/subuid /etc/subgid ||: + +# "useradd -r" doesn't add sub[ug]ids so we manually add some. Hopefully there +# aren't any conflicts here, because usermod doesn't provide the same "get +# unusued range" feature that dockremap does. +grep -q '^dockremap:' /etc/subuid || \ + usermod -v 100000000-200000000 dockremap &>/dev/null || \ + echo "dockremap:100000000:100000001" >>/etc/subuid ||: +grep -q '^dockremap:' /etc/subgid || \ + usermod -w 100000000-200000000 dockremap &>/dev/null || \ + echo "dockremap:100000000:100000001" >>/etc/subgid ||: + +%service_add_pre %{name}.service %{name}.socket + +%post +%service_add_post %{name}.service %{name}.socket +%{fillup_only -n docker} + +%preun +%service_del_preun %{name}.service %{name}.socket + +%postun +%service_del_postun %{name}.service %{name}.socket + +%files +%defattr(-,root,root) +%doc README.md README_SUSE.md +%license LICENSE +%{_bindir}/docker +%{_bindir}/dockerd +%{_bindir}/docker-proxy +%{_sbindir}/rcdocker +%dir %{_localstatedir}/lib/docker/ + +%dir /usr/lib/docker +%dir /usr/lib/docker/cli-plugins + +%{_unitdir}/%{name}.service +%{_unitdir}/%{name}.socket +%{_sysusersdir}/%{name}.conf + +%dir %{_sysconfdir}/docker +%config(noreplace) %{_sysconfdir}/docker/daemon.json +%{_fillupdir}/sysconfig.docker + +%config %{_sysconfdir}/audit/rules.d/%{name}.rules +%{_udevrulesdir}/80-%{name}.rules + +%{_mandir}/man1/docker-*.1%{ext_man} +%{_mandir}/man1/docker.1%{ext_man} +%{_mandir}/man5/Dockerfile.5%{ext_man} +%{_mandir}/man8/dockerd.8%{ext_man} + +%files bash-completion +%defattr(-,root,root) +%{_datarootdir}/bash-completion/completions/%{name} + +%files zsh-completion +%defattr(-,root,root) +%{_sysconfdir}/zsh_completion.d/_%{name} + +%files fish-completion +%defattr(-,root,root) +%{_datadir}/fish/vendor_completions.d/%{name}.fish + +%files rootless-extras +%defattr(-,root,root) +%{_bindir}/dockerd-rootless.sh +%{_bindir}/dockerd-rootless-setuptool.sh + +%changelog diff --git a/docker.sysusers b/docker.sysusers new file mode 100644 index 0000000..82b8e64 --- /dev/null +++ b/docker.sysusers @@ -0,0 +1,3 @@ +#Type Name ID GECOS Home directory Shell +g docker - - - - +u dockremap - 'docker --userns-remap=default' - - diff --git a/sysconfig.docker b/sysconfig.docker new file mode 100644 index 0000000..f089e52 --- /dev/null +++ b/sysconfig.docker @@ -0,0 +1,8 @@ + +## Path : System/Management +## Description : Extra cli switches for docker daemon +## Type : string +## Default : "" +## ServiceRestart : docker +# +DOCKER_OPTS=""