6a719b3954
sysconfig a long time ago, and apparently this causes issues with systemd in some cases. OBS-URL: https://build.opensuse.org/package/show/Virtualization:containers/docker?expand=0&rev=416
23383 lines
708 KiB
Diff
23383 lines
708 KiB
Diff
From 3701285f1cf678dda730e3f9a4860d74ca54057d Mon Sep 17 00:00:00 2001
|
||
From: danishprakash <danish.prakash@suse.com>
|
||
Date: Mon, 12 Feb 2024 18:07:06 +0530
|
||
Subject: [PATCH] docs: include required tools in source tree
|
||
|
||
In order to be able to build the documentation without internet access
|
||
(as is required by some distribution build systems), all of the source
|
||
code needed for the build needs to be available in the source tarball.
|
||
|
||
This used to be possible with the docker-cli sources but was
|
||
accidentally broken with some CI changes that switched to downloading
|
||
the tools (by modifying go.mod as part of the docs build script).
|
||
|
||
This pattern also maked documentation builds less reproducible since the
|
||
tool version used was not based on the source code version.
|
||
|
||
Fixes: commit 7dc35c03fca5 ("validate manpages target")
|
||
Fixes: commit a650f4ddd008 ("switch to cli-docs-tool for yaml docs generation")
|
||
Signed-off-by: Aleksa Sarai <asarai@suse.de>
|
||
---
|
||
docs/generate/go.mod | 13 -
|
||
docs/generate/tools.go | 8 -
|
||
import.go | 17 +
|
||
man/tools.go | 11 -
|
||
scripts/docs/generate-man.sh | 33 +-
|
||
scripts/docs/generate-md.sh | 36 +-
|
||
scripts/docs/generate-yaml.sh | 29 +-
|
||
vendor.mod | 5 +
|
||
vendor.sum | 4 +
|
||
.../cpuguy83/go-md2man/v2/.gitignore | 2 +
|
||
.../cpuguy83/go-md2man/v2/.golangci.yml | 6 +
|
||
.../cpuguy83/go-md2man/v2/Dockerfile | 20 +
|
||
.../cpuguy83/go-md2man/v2/LICENSE.md | 21 +
|
||
.../github.com/cpuguy83/go-md2man/v2/Makefile | 35 +
|
||
.../cpuguy83/go-md2man/v2/README.md | 15 +
|
||
.../cpuguy83/go-md2man/v2/go-md2man.1.md | 28 +
|
||
.../cpuguy83/go-md2man/v2/md2man.go | 53 +
|
||
.../cpuguy83/go-md2man/v2/md2man/md2man.go | 16 +
|
||
.../cpuguy83/go-md2man/v2/md2man/roff.go | 348 ++
|
||
.../docker/cli-docs-tool/.dockerignore | 2 +
|
||
.../docker/cli-docs-tool/.gitignore | 2 +
|
||
.../docker/cli-docs-tool/.golangci.yml | 37 +
|
||
.../docker/cli-docs-tool/Dockerfile | 86 +
|
||
.../github.com/docker/cli-docs-tool/LICENSE | 202 ++
|
||
.../github.com/docker/cli-docs-tool/README.md | 67 +
|
||
.../cli-docs-tool/annotation/annotation.go | 25 +
|
||
.../docker/cli-docs-tool/clidocstool.go | 123 +
|
||
.../docker/cli-docs-tool/clidocstool_md.go | 280 ++
|
||
.../docker/cli-docs-tool/clidocstool_yaml.go | 435 +++
|
||
.../docker/cli-docs-tool/docker-bake.hcl | 51 +
|
||
.../docker/cli-docs-tool/markdown.go | 87 +
|
||
.../russross/blackfriday/v2/.gitignore | 8 +
|
||
.../russross/blackfriday/v2/.travis.yml | 17 +
|
||
.../russross/blackfriday/v2/LICENSE.txt | 29 +
|
||
.../russross/blackfriday/v2/README.md | 335 ++
|
||
.../russross/blackfriday/v2/block.go | 1612 +++++++++
|
||
.../github.com/russross/blackfriday/v2/doc.go | 46 +
|
||
.../russross/blackfriday/v2/entities.go | 2236 ++++++++++++
|
||
.../github.com/russross/blackfriday/v2/esc.go | 70 +
|
||
.../russross/blackfriday/v2/html.go | 952 ++++++
|
||
.../russross/blackfriday/v2/inline.go | 1228 +++++++
|
||
.../russross/blackfriday/v2/markdown.go | 950 ++++++
|
||
.../russross/blackfriday/v2/node.go | 360 ++
|
||
.../russross/blackfriday/v2/smartypants.go | 457 +++
|
||
vendor/github.com/spf13/cobra/doc/man_docs.go | 246 ++
|
||
vendor/github.com/spf13/cobra/doc/md_docs.go | 158 +
|
||
.../github.com/spf13/cobra/doc/rest_docs.go | 186 +
|
||
vendor/github.com/spf13/cobra/doc/util.go | 52 +
|
||
.../github.com/spf13/cobra/doc/yaml_docs.go | 175 +
|
||
vendor/gopkg.in/yaml.v3/LICENSE | 50 +
|
||
vendor/gopkg.in/yaml.v3/NOTICE | 13 +
|
||
vendor/gopkg.in/yaml.v3/README.md | 150 +
|
||
vendor/gopkg.in/yaml.v3/apic.go | 747 ++++
|
||
vendor/gopkg.in/yaml.v3/decode.go | 1000 ++++++
|
||
vendor/gopkg.in/yaml.v3/emitterc.go | 2020 +++++++++++
|
||
vendor/gopkg.in/yaml.v3/encode.go | 577 ++++
|
||
vendor/gopkg.in/yaml.v3/parserc.go | 1258 +++++++
|
||
vendor/gopkg.in/yaml.v3/readerc.go | 434 +++
|
||
vendor/gopkg.in/yaml.v3/resolve.go | 326 ++
|
||
vendor/gopkg.in/yaml.v3/scannerc.go | 3038 +++++++++++++++++
|
||
vendor/gopkg.in/yaml.v3/sorter.go | 134 +
|
||
vendor/gopkg.in/yaml.v3/writerc.go | 48 +
|
||
vendor/gopkg.in/yaml.v3/yaml.go | 698 ++++
|
||
vendor/gopkg.in/yaml.v3/yamlh.go | 807 +++++
|
||
vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 ++
|
||
vendor/modules.txt | 15 +
|
||
66 files changed, 22631 insertions(+), 96 deletions(-)
|
||
delete mode 100644 docs/generate/go.mod
|
||
delete mode 100644 docs/generate/tools.go
|
||
create mode 100644 import.go
|
||
delete mode 100644 man/tools.go
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/.gitignore
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/Makefile
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/README.md
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man.go
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/.dockerignore
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/.gitignore
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/.golangci.yml
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/Dockerfile
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/LICENSE
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/README.md
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/annotation/annotation.go
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/clidocstool.go
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/clidocstool_md.go
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/docker-bake.hcl
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/markdown.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/.gitignore
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/.travis.yml
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/LICENSE.txt
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/README.md
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/block.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/doc.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/entities.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/esc.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/html.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/inline.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/markdown.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/node.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/smartypants.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/man_docs.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/md_docs.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/util.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/README.md
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/apic.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/decode.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/encode.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go
|
||
|
||
diff --git a/docs/generate/go.mod b/docs/generate/go.mod
|
||
deleted file mode 100644
|
||
index d62ff455713a..000000000000
|
||
--- a/docs/generate/go.mod
|
||
+++ /dev/null
|
||
@@ -1,13 +0,0 @@
|
||
-module github.com/docker/cli/docs/generate
|
||
-
|
||
-// dummy go.mod to avoid dealing with dependencies specific
|
||
-// to docs generation and not really part of the project.
|
||
-
|
||
-go 1.16
|
||
-
|
||
-//require (
|
||
-// github.com/docker/cli v0.0.0+incompatible
|
||
-// github.com/docker/cli-docs-tool v0.5.0
|
||
-//)
|
||
-//
|
||
-//replace github.com/docker/cli v0.0.0+incompatible => ../../
|
||
diff --git a/docs/generate/tools.go b/docs/generate/tools.go
|
||
deleted file mode 100644
|
||
index 47510bc49a89..000000000000
|
||
--- a/docs/generate/tools.go
|
||
+++ /dev/null
|
||
@@ -1,8 +0,0 @@
|
||
-//go:build tools
|
||
-// +build tools
|
||
-
|
||
-package main
|
||
-
|
||
-import (
|
||
- _ "github.com/docker/cli-docs-tool"
|
||
-)
|
||
diff --git a/import.go b/import.go
|
||
new file mode 100644
|
||
index 000000000000..662a6055146c
|
||
--- /dev/null
|
||
+++ b/import.go
|
||
@@ -0,0 +1,17 @@
|
||
+// This is only used to define imports we need for doc generation.
|
||
+
|
||
+//go:build never
|
||
+// +build never
|
||
+
|
||
+package cli
|
||
+
|
||
+import (
|
||
+ // Used for md and yaml doc generation.
|
||
+ _ "github.com/docker/cli-docs-tool"
|
||
+
|
||
+ // Used for man page generation.
|
||
+ _ "github.com/cpuguy83/go-md2man/v2"
|
||
+ _ "github.com/spf13/cobra"
|
||
+ _ "github.com/spf13/cobra/doc"
|
||
+ _ "github.com/spf13/pflag"
|
||
+)
|
||
diff --git a/man/tools.go b/man/tools.go
|
||
deleted file mode 100644
|
||
index 3cafe6533aff..000000000000
|
||
--- a/man/tools.go
|
||
+++ /dev/null
|
||
@@ -1,11 +0,0 @@
|
||
-//go:build tools
|
||
-// +build tools
|
||
-
|
||
-package main
|
||
-
|
||
-import (
|
||
- _ "github.com/cpuguy83/go-md2man/v2"
|
||
- _ "github.com/spf13/cobra"
|
||
- _ "github.com/spf13/cobra/doc"
|
||
- _ "github.com/spf13/pflag"
|
||
-)
|
||
diff --git a/scripts/docs/generate-man.sh b/scripts/docs/generate-man.sh
|
||
index 12a4b81199db..1e12a95e9c9a 100755
|
||
--- a/scripts/docs/generate-man.sh
|
||
+++ b/scripts/docs/generate-man.sh
|
||
@@ -1,35 +1,22 @@
|
||
#!/usr/bin/env bash
|
||
|
||
-set -eu
|
||
-
|
||
-: "${MD2MAN_VERSION=v2.0.3}"
|
||
+set -Eeuo pipefail
|
||
|
||
export GO111MODULE=auto
|
||
|
||
-function clean {
|
||
- rm -rf "$buildir"
|
||
+# temporary "go.mod" to make -modfile= work
|
||
+touch go.mod
|
||
+
|
||
+function clean() {
|
||
+ rm -f "$(pwd)/go.mod"
|
||
}
|
||
|
||
-buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX)
|
||
trap clean EXIT
|
||
|
||
-(
|
||
- set -x
|
||
- cp -r . "$buildir/"
|
||
- cd "$buildir"
|
||
- # init dummy go.mod
|
||
- ./scripts/vendor init
|
||
- # install go-md2man and copy man/tools.go in root folder
|
||
- # to be able to fetch the required dependencies
|
||
- go mod edit -modfile=vendor.mod -require=github.com/cpuguy83/go-md2man/v2@${MD2MAN_VERSION}
|
||
- cp man/tools.go .
|
||
- # update vendor
|
||
- ./scripts/vendor update
|
||
- # build gen-manpages
|
||
- go build -mod=vendor -modfile=vendor.mod -tags manpages -o /tmp/gen-manpages ./man/generate.go
|
||
- # build go-md2man
|
||
- go build -mod=vendor -modfile=vendor.mod -o /tmp/go-md2man ./vendor/github.com/cpuguy83/go-md2man/v2
|
||
-)
|
||
+# build gen-manpages
|
||
+go build -mod=vendor -modfile=vendor.mod -tags manpages -o /tmp/gen-manpages ./man/generate.go
|
||
+# build go-md2man
|
||
+go build -mod=vendor -modfile=vendor.mod -o /tmp/go-md2man ./vendor/github.com/cpuguy83/go-md2man/v2
|
||
|
||
mkdir -p man/man1
|
||
(set -x ; /tmp/gen-manpages --root "." --target "$(pwd)/man/man1")
|
||
diff --git a/scripts/docs/generate-md.sh b/scripts/docs/generate-md.sh
|
||
index 4caa01eaed23..0af86843bbe4 100755
|
||
--- a/scripts/docs/generate-md.sh
|
||
+++ b/scripts/docs/generate-md.sh
|
||
@@ -1,33 +1,29 @@
|
||
#!/usr/bin/env bash
|
||
|
||
-set -eu
|
||
-
|
||
-: "${CLI_DOCS_TOOL_VERSION=v0.7.0}"
|
||
+set -Eeuo pipefail
|
||
|
||
export GO111MODULE=auto
|
||
|
||
+# temporary "go.mod" to make -modfile= work
|
||
+touch go.mod
|
||
+
|
||
function clean {
|
||
- rm -rf "$buildir"
|
||
+ rm -f "$(pwd)/go.mod"
|
||
+ if [ -f "$(pwd)/docs/reference/commandline/docker.md" ]; then
|
||
+ mv "$(pwd)/docs/reference/commandline/docker.md" "$(pwd)/docs/reference/commandline/cli.md"
|
||
+ fi
|
||
}
|
||
|
||
-buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX)
|
||
trap clean EXIT
|
||
|
||
-(
|
||
- set -x
|
||
- cp -r . "$buildir/"
|
||
- cd "$buildir"
|
||
- # init dummy go.mod
|
||
- ./scripts/vendor init
|
||
- # install cli-docs-tool and copy docs/tools.go in root folder
|
||
- # to be able to fetch the required depedencies
|
||
- go mod edit -modfile=vendor.mod -require=github.com/docker/cli-docs-tool@${CLI_DOCS_TOOL_VERSION}
|
||
- cp docs/generate/tools.go .
|
||
- # update vendor
|
||
- ./scripts/vendor update
|
||
- # build docsgen
|
||
- go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go
|
||
-)
|
||
+# build docsgen
|
||
+go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go
|
||
+
|
||
+# yaml generation on docs repo needs the cli.md file: https://github.com/docker/cli/pull/3924#discussion_r1059986605
|
||
+# but markdown generation docker.md atm. While waiting for a fix in cli-docs-tool
|
||
+# we need to first move the cli.md file to docker.md, do the generation and
|
||
+# then move it back in trap handler.
|
||
+mv "$(pwd)/docs/reference/commandline/cli.md" "$(pwd)/docs/reference/commandline/docker.md"
|
||
|
||
(
|
||
set -x
|
||
diff --git a/scripts/docs/generate-yaml.sh b/scripts/docs/generate-yaml.sh
|
||
index 0d67c5e5bb09..7d98e161df5d 100755
|
||
--- a/scripts/docs/generate-yaml.sh
|
||
+++ b/scripts/docs/generate-yaml.sh
|
||
@@ -1,33 +1,20 @@
|
||
#!/usr/bin/env bash
|
||
|
||
-set -eu
|
||
-
|
||
-: "${CLI_DOCS_TOOL_VERSION=v0.7.0}"
|
||
+set -Eeuo pipefail
|
||
|
||
export GO111MODULE=auto
|
||
|
||
-function clean {
|
||
- rm -rf "$buildir"
|
||
+# temporary "go.mod" to make -modfile= work
|
||
+touch go.mod
|
||
+
|
||
+function clean() {
|
||
+ rm -f "$(pwd)/go.mod"
|
||
}
|
||
|
||
-buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX)
|
||
trap clean EXIT
|
||
|
||
-(
|
||
- set -x
|
||
- cp -r . "$buildir/"
|
||
- cd "$buildir"
|
||
- # init dummy go.mod
|
||
- ./scripts/vendor init
|
||
- # install cli-docs-tool and copy docs/tools.go in root folder
|
||
- # to be able to fetch the required depedencies
|
||
- go mod edit -modfile=vendor.mod -require=github.com/docker/cli-docs-tool@${CLI_DOCS_TOOL_VERSION}
|
||
- cp docs/generate/tools.go .
|
||
- # update vendor
|
||
- ./scripts/vendor update
|
||
- # build docsgen
|
||
- go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go
|
||
-)
|
||
+# build docsgen
|
||
+go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go
|
||
|
||
mkdir -p docs/yaml
|
||
set -x
|
||
diff --git a/vendor.mod b/vendor.mod
|
||
index 3bc5ce327f0f..a654f78703d6 100644
|
||
--- a/vendor.mod
|
||
+++ b/vendor.mod
|
||
@@ -11,6 +11,7 @@ require (
|
||
github.com/containerd/platforms v0.2.0
|
||
github.com/creack/pty v1.1.21
|
||
github.com/distribution/reference v0.5.0
|
||
+ github.com/docker/cli-docs-tool v0.6.0
|
||
github.com/docker/distribution v2.8.3+incompatible
|
||
github.com/docker/docker v26.1.4-0.20240605103321-de5c9cf0b96e+incompatible // 26.1 branch (v26.1.4-dev)
|
||
github.com/docker/docker-credential-helpers v0.8.1
|
||
@@ -53,6 +54,8 @@ require (
|
||
tags.cncf.io/container-device-interface v0.7.2
|
||
)
|
||
|
||
+require github.com/cpuguy83/go-md2man/v2 v2.0.3
|
||
+
|
||
require (
|
||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||
@@ -83,6 +86,7 @@ require (
|
||
github.com/prometheus/common v0.44.0 // indirect
|
||
github.com/prometheus/procfs v0.12.0 // indirect
|
||
github.com/rivo/uniseg v0.2.0 // indirect
|
||
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
||
@@ -96,4 +100,5 @@ require (
|
||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect
|
||
google.golang.org/grpc v1.60.1 // indirect
|
||
google.golang.org/protobuf v1.33.0 // indirect
|
||
+ gopkg.in/yaml.v3 v3.0.1 // indirect
|
||
)
|
||
diff --git a/vendor.sum b/vendor.sum
|
||
index 6a31c9b2cf62..a0905e657c37 100644
|
||
--- a/vendor.sum
|
||
+++ b/vendor.sum
|
||
@@ -46,6 +46,7 @@ github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3
|
||
github.com/containerd/platforms v0.2.0 h1:clGNvVIcY3k39VJSYdFGohI1b3bP/eeBUVR5+XA28oo=
|
||
github.com/containerd/platforms v0.2.0/go.mod h1:XOM2BS6kN6gXafPLg80V6y/QUib+xoLyC3qVmHzibko=
|
||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||
+github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||
github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
|
||
@@ -56,6 +57,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||
+github.com/docker/cli-docs-tool v0.6.0 h1:Z9x10SaZgFaB6jHgz3OWooynhSa40CsWkpe5hEnG/qA=
|
||
+github.com/docker/cli-docs-tool v0.6.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
|
||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||
@@ -241,6 +244,7 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore b/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore
|
||
new file mode 100644
|
||
index 000000000000..30f97c3d73ab
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore
|
||
@@ -0,0 +1,2 @@
|
||
+go-md2man
|
||
+bin
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml b/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml
|
||
new file mode 100644
|
||
index 000000000000..71f073f3c6b9
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml
|
||
@@ -0,0 +1,6 @@
|
||
+# For documentation, see https://golangci-lint.run/usage/configuration/
|
||
+
|
||
+linters:
|
||
+ enable:
|
||
+ - gofumpt
|
||
+
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile b/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile
|
||
new file mode 100644
|
||
index 000000000000..7181c5306f41
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile
|
||
@@ -0,0 +1,20 @@
|
||
+ARG GO_VERSION=1.18
|
||
+ARG GO_IMAGE=golang:${GO_VERSION}
|
||
+
|
||
+FROM --platform=$BUILDPLATFORM $GO_IMAGE AS build
|
||
+COPY . /go/src/github.com/cpuguy83/go-md2man
|
||
+WORKDIR /go/src/github.com/cpuguy83/go-md2man
|
||
+ARG TARGETOS
|
||
+ARG TARGETARCH
|
||
+ARG TARGETVARIANT
|
||
+RUN \
|
||
+ export GOOS="${TARGETOS}"; \
|
||
+ export GOARCH="${TARGETARCH}"; \
|
||
+ if [ "${TARGETARCH}" = "arm" ] && [ "${TARGETVARIANT}" ]; then \
|
||
+ export GOARM="${TARGETVARIANT#v}"; \
|
||
+ fi; \
|
||
+ CGO_ENABLED=0 go build
|
||
+
|
||
+FROM scratch
|
||
+COPY --from=build /go/src/github.com/cpuguy83/go-md2man/go-md2man /go-md2man
|
||
+ENTRYPOINT ["/go-md2man"]
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
|
||
new file mode 100644
|
||
index 000000000000..1cade6cef6a1
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
|
||
@@ -0,0 +1,21 @@
|
||
+The MIT License (MIT)
|
||
+
|
||
+Copyright (c) 2014 Brian Goff
|
||
+
|
||
+Permission is hereby granted, free of charge, to any person obtaining a copy
|
||
+of this software and associated documentation files (the "Software"), to deal
|
||
+in the Software without restriction, including without limitation the rights
|
||
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||
+copies of the Software, and to permit persons to whom the Software is
|
||
+furnished to do so, subject to the following conditions:
|
||
+
|
||
+The above copyright notice and this permission notice shall be included in all
|
||
+copies or substantial portions of the Software.
|
||
+
|
||
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+SOFTWARE.
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/Makefile b/vendor/github.com/cpuguy83/go-md2man/v2/Makefile
|
||
new file mode 100644
|
||
index 000000000000..437fc9997926
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/Makefile
|
||
@@ -0,0 +1,35 @@
|
||
+GO111MODULE ?= on
|
||
+LINTER_BIN ?= golangci-lint
|
||
+
|
||
+export GO111MODULE
|
||
+
|
||
+.PHONY:
|
||
+build: bin/go-md2man
|
||
+
|
||
+.PHONY: clean
|
||
+clean:
|
||
+ @rm -rf bin/*
|
||
+
|
||
+.PHONY: test
|
||
+test:
|
||
+ @go test $(TEST_FLAGS) ./...
|
||
+
|
||
+bin/go-md2man: actual_build_flags := $(BUILD_FLAGS) -o bin/go-md2man
|
||
+bin/go-md2man: bin
|
||
+ @CGO_ENABLED=0 go build $(actual_build_flags)
|
||
+
|
||
+bin:
|
||
+ @mkdir ./bin
|
||
+
|
||
+.PHONY: mod
|
||
+mod:
|
||
+ @go mod tidy
|
||
+
|
||
+.PHONY: check-mod
|
||
+check-mod: # verifies that module changes for go.mod and go.sum are checked in
|
||
+ @hack/ci/check_mods.sh
|
||
+
|
||
+.PHONY: vendor
|
||
+vendor: mod
|
||
+ @go mod vendor -v
|
||
+
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/README.md b/vendor/github.com/cpuguy83/go-md2man/v2/README.md
|
||
new file mode 100644
|
||
index 000000000000..0e30d341483c
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/README.md
|
||
@@ -0,0 +1,15 @@
|
||
+go-md2man
|
||
+=========
|
||
+
|
||
+Converts markdown into roff (man pages).
|
||
+
|
||
+Uses blackfriday to process markdown into man pages.
|
||
+
|
||
+### Usage
|
||
+
|
||
+./md2man -in /path/to/markdownfile.md -out /manfile/output/path
|
||
+
|
||
+### How to contribute
|
||
+
|
||
+We use go modules to manage dependencies.
|
||
+As such you must be using at lest go1.11.
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md b/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
|
||
new file mode 100644
|
||
index 000000000000..aa4587e279ff
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
|
||
@@ -0,0 +1,28 @@
|
||
+go-md2man 1 "January 2015" go-md2man "User Manual"
|
||
+==================================================
|
||
+
|
||
+# NAME
|
||
+go-md2man - Convert markdown files into manpages
|
||
+
|
||
+# SYNOPSIS
|
||
+**go-md2man** [**-in**=*/path/to/md/file*] [**-out**=*/path/to/output*]
|
||
+
|
||
+# DESCRIPTION
|
||
+**go-md2man** converts standard markdown formatted documents into manpages. It is
|
||
+written purely in Go so as to reduce dependencies on 3rd party libs.
|
||
+
|
||
+By default, the input is stdin and the output is stdout.
|
||
+
|
||
+# EXAMPLES
|
||
+Convert the markdown file *go-md2man.1.md* into a manpage:
|
||
+```
|
||
+go-md2man < go-md2man.1.md > go-md2man.1
|
||
+```
|
||
+
|
||
+Same, but using command line arguments instead of shell redirection:
|
||
+```
|
||
+go-md2man -in=go-md2man.1.md -out=go-md2man.1
|
||
+```
|
||
+
|
||
+# HISTORY
|
||
+January 2015, Originally compiled by Brian Goff (cpuguy83@gmail.com).
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go
|
||
new file mode 100644
|
||
index 000000000000..4ff873b8e767
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go
|
||
@@ -0,0 +1,53 @@
|
||
+package main
|
||
+
|
||
+import (
|
||
+ "flag"
|
||
+ "fmt"
|
||
+ "io/ioutil"
|
||
+ "os"
|
||
+
|
||
+ "github.com/cpuguy83/go-md2man/v2/md2man"
|
||
+)
|
||
+
|
||
+var (
|
||
+ inFilePath = flag.String("in", "", "Path to file to be processed (default: stdin)")
|
||
+ outFilePath = flag.String("out", "", "Path to output processed file (default: stdout)")
|
||
+)
|
||
+
|
||
+func main() {
|
||
+ var err error
|
||
+ flag.Parse()
|
||
+
|
||
+ inFile := os.Stdin
|
||
+ if *inFilePath != "" {
|
||
+ inFile, err = os.Open(*inFilePath)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+ }
|
||
+ defer inFile.Close() // nolint: errcheck
|
||
+
|
||
+ doc, err := ioutil.ReadAll(inFile)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+
|
||
+ out := md2man.Render(doc)
|
||
+
|
||
+ outFile := os.Stdout
|
||
+ if *outFilePath != "" {
|
||
+ outFile, err = os.Create(*outFilePath)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+ defer outFile.Close() // nolint: errcheck
|
||
+ }
|
||
+ _, err = outFile.Write(out)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+}
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
|
||
new file mode 100644
|
||
index 000000000000..42bf32aab003
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
|
||
@@ -0,0 +1,16 @@
|
||
+package md2man
|
||
+
|
||
+import (
|
||
+ "github.com/russross/blackfriday/v2"
|
||
+)
|
||
+
|
||
+// Render converts a markdown document into a roff formatted document.
|
||
+func Render(doc []byte) []byte {
|
||
+ renderer := NewRoffRenderer()
|
||
+
|
||
+ return blackfriday.Run(doc,
|
||
+ []blackfriday.Option{
|
||
+ blackfriday.WithRenderer(renderer),
|
||
+ blackfriday.WithExtensions(renderer.GetExtensions()),
|
||
+ }...)
|
||
+}
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
|
||
new file mode 100644
|
||
index 000000000000..4b19188d90fd
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
|
||
@@ -0,0 +1,348 @@
|
||
+package md2man
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "os"
|
||
+ "strings"
|
||
+
|
||
+ "github.com/russross/blackfriday/v2"
|
||
+)
|
||
+
|
||
+// roffRenderer implements the blackfriday.Renderer interface for creating
|
||
+// roff format (manpages) from markdown text
|
||
+type roffRenderer struct {
|
||
+ extensions blackfriday.Extensions
|
||
+ listCounters []int
|
||
+ firstHeader bool
|
||
+ firstDD bool
|
||
+ listDepth int
|
||
+}
|
||
+
|
||
+const (
|
||
+ titleHeader = ".TH "
|
||
+ topLevelHeader = "\n\n.SH "
|
||
+ secondLevelHdr = "\n.SH "
|
||
+ otherHeader = "\n.SS "
|
||
+ crTag = "\n"
|
||
+ emphTag = "\\fI"
|
||
+ emphCloseTag = "\\fP"
|
||
+ strongTag = "\\fB"
|
||
+ strongCloseTag = "\\fP"
|
||
+ breakTag = "\n.br\n"
|
||
+ paraTag = "\n.PP\n"
|
||
+ hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
|
||
+ linkTag = "\n\\[la]"
|
||
+ linkCloseTag = "\\[ra]"
|
||
+ codespanTag = "\\fB"
|
||
+ codespanCloseTag = "\\fR"
|
||
+ codeTag = "\n.EX\n"
|
||
+ codeCloseTag = "\n.EE\n"
|
||
+ quoteTag = "\n.PP\n.RS\n"
|
||
+ quoteCloseTag = "\n.RE\n"
|
||
+ listTag = "\n.RS\n"
|
||
+ listCloseTag = "\n.RE\n"
|
||
+ dtTag = "\n.TP\n"
|
||
+ dd2Tag = "\n"
|
||
+ tableStart = "\n.TS\nallbox;\n"
|
||
+ tableEnd = ".TE\n"
|
||
+ tableCellStart = "T{\n"
|
||
+ tableCellEnd = "\nT}\n"
|
||
+)
|
||
+
|
||
+// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||
+// from markdown
|
||
+func NewRoffRenderer() *roffRenderer { // nolint: golint
|
||
+ var extensions blackfriday.Extensions
|
||
+
|
||
+ extensions |= blackfriday.NoIntraEmphasis
|
||
+ extensions |= blackfriday.Tables
|
||
+ extensions |= blackfriday.FencedCode
|
||
+ extensions |= blackfriday.SpaceHeadings
|
||
+ extensions |= blackfriday.Footnotes
|
||
+ extensions |= blackfriday.Titleblock
|
||
+ extensions |= blackfriday.DefinitionLists
|
||
+ return &roffRenderer{
|
||
+ extensions: extensions,
|
||
+ }
|
||
+}
|
||
+
|
||
+// GetExtensions returns the list of extensions used by this renderer implementation
|
||
+func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
|
||
+ return r.extensions
|
||
+}
|
||
+
|
||
+// RenderHeader handles outputting the header at document start
|
||
+func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
|
||
+ // disable hyphenation
|
||
+ out(w, ".nh\n")
|
||
+}
|
||
+
|
||
+// RenderFooter handles outputting the footer at the document end; the roff
|
||
+// renderer has no footer information
|
||
+func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
||
+}
|
||
+
|
||
+// RenderNode is called for each node in a markdown document; based on the node
|
||
+// type the equivalent roff output is sent to the writer
|
||
+func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||
+ walkAction := blackfriday.GoToNext
|
||
+
|
||
+ switch node.Type {
|
||
+ case blackfriday.Text:
|
||
+ escapeSpecialChars(w, node.Literal)
|
||
+ case blackfriday.Softbreak:
|
||
+ out(w, crTag)
|
||
+ case blackfriday.Hardbreak:
|
||
+ out(w, breakTag)
|
||
+ case blackfriday.Emph:
|
||
+ if entering {
|
||
+ out(w, emphTag)
|
||
+ } else {
|
||
+ out(w, emphCloseTag)
|
||
+ }
|
||
+ case blackfriday.Strong:
|
||
+ if entering {
|
||
+ out(w, strongTag)
|
||
+ } else {
|
||
+ out(w, strongCloseTag)
|
||
+ }
|
||
+ case blackfriday.Link:
|
||
+ // Don't render the link text for automatic links, because this
|
||
+ // will only duplicate the URL in the roff output.
|
||
+ // See https://daringfireball.net/projects/markdown/syntax#autolink
|
||
+ if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) {
|
||
+ out(w, string(node.FirstChild.Literal))
|
||
+ }
|
||
+ // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page.
|
||
+ escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-")
|
||
+ out(w, linkTag+escapedLink+linkCloseTag)
|
||
+ walkAction = blackfriday.SkipChildren
|
||
+ case blackfriday.Image:
|
||
+ // ignore images
|
||
+ walkAction = blackfriday.SkipChildren
|
||
+ case blackfriday.Code:
|
||
+ out(w, codespanTag)
|
||
+ escapeSpecialChars(w, node.Literal)
|
||
+ out(w, codespanCloseTag)
|
||
+ case blackfriday.Document:
|
||
+ break
|
||
+ case blackfriday.Paragraph:
|
||
+ // roff .PP markers break lists
|
||
+ if r.listDepth > 0 {
|
||
+ return blackfriday.GoToNext
|
||
+ }
|
||
+ if entering {
|
||
+ out(w, paraTag)
|
||
+ } else {
|
||
+ out(w, crTag)
|
||
+ }
|
||
+ case blackfriday.BlockQuote:
|
||
+ if entering {
|
||
+ out(w, quoteTag)
|
||
+ } else {
|
||
+ out(w, quoteCloseTag)
|
||
+ }
|
||
+ case blackfriday.Heading:
|
||
+ r.handleHeading(w, node, entering)
|
||
+ case blackfriday.HorizontalRule:
|
||
+ out(w, hruleTag)
|
||
+ case blackfriday.List:
|
||
+ r.handleList(w, node, entering)
|
||
+ case blackfriday.Item:
|
||
+ r.handleItem(w, node, entering)
|
||
+ case blackfriday.CodeBlock:
|
||
+ out(w, codeTag)
|
||
+ escapeSpecialChars(w, node.Literal)
|
||
+ out(w, codeCloseTag)
|
||
+ case blackfriday.Table:
|
||
+ r.handleTable(w, node, entering)
|
||
+ case blackfriday.TableHead:
|
||
+ case blackfriday.TableBody:
|
||
+ case blackfriday.TableRow:
|
||
+ // no action as cell entries do all the nroff formatting
|
||
+ return blackfriday.GoToNext
|
||
+ case blackfriday.TableCell:
|
||
+ r.handleTableCell(w, node, entering)
|
||
+ case blackfriday.HTMLSpan:
|
||
+ // ignore other HTML tags
|
||
+ case blackfriday.HTMLBlock:
|
||
+ if bytes.HasPrefix(node.Literal, []byte("<!--")) {
|
||
+ break // ignore comments, no warning
|
||
+ }
|
||
+ fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||
+ default:
|
||
+ fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||
+ }
|
||
+ return walkAction
|
||
+}
|
||
+
|
||
+func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
|
||
+ if entering {
|
||
+ switch node.Level {
|
||
+ case 1:
|
||
+ if !r.firstHeader {
|
||
+ out(w, titleHeader)
|
||
+ r.firstHeader = true
|
||
+ break
|
||
+ }
|
||
+ out(w, topLevelHeader)
|
||
+ case 2:
|
||
+ out(w, secondLevelHdr)
|
||
+ default:
|
||
+ out(w, otherHeader)
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
|
||
+ openTag := listTag
|
||
+ closeTag := listCloseTag
|
||
+ if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||
+ // tags for definition lists handled within Item node
|
||
+ openTag = ""
|
||
+ closeTag = ""
|
||
+ }
|
||
+ if entering {
|
||
+ r.listDepth++
|
||
+ if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||
+ r.listCounters = append(r.listCounters, 1)
|
||
+ }
|
||
+ out(w, openTag)
|
||
+ } else {
|
||
+ if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||
+ r.listCounters = r.listCounters[:len(r.listCounters)-1]
|
||
+ }
|
||
+ out(w, closeTag)
|
||
+ r.listDepth--
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
|
||
+ if entering {
|
||
+ if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||
+ out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
|
||
+ r.listCounters[len(r.listCounters)-1]++
|
||
+ } else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
|
||
+ // DT (definition term): line just before DD (see below).
|
||
+ out(w, dtTag)
|
||
+ r.firstDD = true
|
||
+ } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||
+ // DD (definition description): line that starts with ": ".
|
||
+ //
|
||
+ // We have to distinguish between the first DD and the
|
||
+ // subsequent ones, as there should be no vertical
|
||
+ // whitespace between the DT and the first DD.
|
||
+ if r.firstDD {
|
||
+ r.firstDD = false
|
||
+ } else {
|
||
+ out(w, dd2Tag)
|
||
+ }
|
||
+ } else {
|
||
+ out(w, ".IP \\(bu 2\n")
|
||
+ }
|
||
+ } else {
|
||
+ out(w, "\n")
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
|
||
+ if entering {
|
||
+ out(w, tableStart)
|
||
+ // call walker to count cells (and rows?) so format section can be produced
|
||
+ columns := countColumns(node)
|
||
+ out(w, strings.Repeat("l ", columns)+"\n")
|
||
+ out(w, strings.Repeat("l ", columns)+".\n")
|
||
+ } else {
|
||
+ out(w, tableEnd)
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
|
||
+ if entering {
|
||
+ var start string
|
||
+ if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
|
||
+ start = "\t"
|
||
+ }
|
||
+ if node.IsHeader {
|
||
+ start += strongTag
|
||
+ } else if nodeLiteralSize(node) > 30 {
|
||
+ start += tableCellStart
|
||
+ }
|
||
+ out(w, start)
|
||
+ } else {
|
||
+ var end string
|
||
+ if node.IsHeader {
|
||
+ end = strongCloseTag
|
||
+ } else if nodeLiteralSize(node) > 30 {
|
||
+ end = tableCellEnd
|
||
+ }
|
||
+ if node.Next == nil && end != tableCellEnd {
|
||
+ // Last cell: need to carriage return if we are at the end of the
|
||
+ // header row and content isn't wrapped in a "tablecell"
|
||
+ end += crTag
|
||
+ }
|
||
+ out(w, end)
|
||
+ }
|
||
+}
|
||
+
|
||
+func nodeLiteralSize(node *blackfriday.Node) int {
|
||
+ total := 0
|
||
+ for n := node.FirstChild; n != nil; n = n.FirstChild {
|
||
+ total += len(n.Literal)
|
||
+ }
|
||
+ return total
|
||
+}
|
||
+
|
||
+// because roff format requires knowing the column count before outputting any table
|
||
+// data we need to walk a table tree and count the columns
|
||
+func countColumns(node *blackfriday.Node) int {
|
||
+ var columns int
|
||
+
|
||
+ node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||
+ switch node.Type {
|
||
+ case blackfriday.TableRow:
|
||
+ if !entering {
|
||
+ return blackfriday.Terminate
|
||
+ }
|
||
+ case blackfriday.TableCell:
|
||
+ if entering {
|
||
+ columns++
|
||
+ }
|
||
+ default:
|
||
+ }
|
||
+ return blackfriday.GoToNext
|
||
+ })
|
||
+ return columns
|
||
+}
|
||
+
|
||
+func out(w io.Writer, output string) {
|
||
+ io.WriteString(w, output) // nolint: errcheck
|
||
+}
|
||
+
|
||
+func escapeSpecialChars(w io.Writer, text []byte) {
|
||
+ for i := 0; i < len(text); i++ {
|
||
+ // escape initial apostrophe or period
|
||
+ if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
||
+ out(w, "\\&")
|
||
+ }
|
||
+
|
||
+ // directly copy normal characters
|
||
+ org := i
|
||
+
|
||
+ for i < len(text) && text[i] != '\\' {
|
||
+ i++
|
||
+ }
|
||
+ if i > org {
|
||
+ w.Write(text[org:i]) // nolint: errcheck
|
||
+ }
|
||
+
|
||
+ // escape a character
|
||
+ if i >= len(text) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||
+ }
|
||
+}
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/.dockerignore b/vendor/github.com/docker/cli-docs-tool/.dockerignore
|
||
new file mode 100644
|
||
index 000000000000..c8c323c89663
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/.dockerignore
|
||
@@ -0,0 +1,2 @@
|
||
+/coverage.txt
|
||
+/example/docs
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/.gitignore b/vendor/github.com/docker/cli-docs-tool/.gitignore
|
||
new file mode 100644
|
||
index 000000000000..c8c323c89663
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/.gitignore
|
||
@@ -0,0 +1,2 @@
|
||
+/coverage.txt
|
||
+/example/docs
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/.golangci.yml b/vendor/github.com/docker/cli-docs-tool/.golangci.yml
|
||
new file mode 100644
|
||
index 000000000000..6c6557176b28
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/.golangci.yml
|
||
@@ -0,0 +1,37 @@
|
||
+run:
|
||
+ timeout: 10m
|
||
+
|
||
+linters:
|
||
+ enable:
|
||
+ - deadcode
|
||
+ - depguard
|
||
+ - gofmt
|
||
+ - goimports
|
||
+ - revive
|
||
+ - govet
|
||
+ - importas
|
||
+ - ineffassign
|
||
+ - misspell
|
||
+ - typecheck
|
||
+ - varcheck
|
||
+ - errname
|
||
+ - makezero
|
||
+ - whitespace
|
||
+ disable-all: true
|
||
+
|
||
+linters-settings:
|
||
+ depguard:
|
||
+ list-type: blacklist
|
||
+ include-go-root: true
|
||
+ packages:
|
||
+ # The io/ioutil package has been deprecated.
|
||
+ # https://go.dev/doc/go1.16#ioutil
|
||
+ - io/ioutil
|
||
+ importas:
|
||
+ no-unaliased: true
|
||
+
|
||
+issues:
|
||
+ exclude-rules:
|
||
+ - linters:
|
||
+ - revive
|
||
+ text: "stutters"
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/Dockerfile b/vendor/github.com/docker/cli-docs-tool/Dockerfile
|
||
new file mode 100644
|
||
index 000000000000..f0e2739faa7c
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/Dockerfile
|
||
@@ -0,0 +1,86 @@
|
||
+# syntax=docker/dockerfile:1
|
||
+
|
||
+# Copyright 2021 cli-docs-tool authors
|
||
+#
|
||
+# Licensed under the Apache License, Version 2.0 (the "License");
|
||
+# you may not use this file except in compliance with the License.
|
||
+# You may obtain a copy of the License at
|
||
+#
|
||
+# http://www.apache.org/licenses/LICENSE-2.0
|
||
+#
|
||
+# Unless required by applicable law or agreed to in writing, software
|
||
+# distributed under the License is distributed on an "AS IS" BASIS,
|
||
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+# See the License for the specific language governing permissions and
|
||
+# limitations under the License.
|
||
+
|
||
+ARG GO_VERSION="1.18"
|
||
+ARG GOLANGCI_LINT_VERSION="v1.45"
|
||
+ARG ADDLICENSE_VERSION="v1.0.0"
|
||
+
|
||
+ARG LICENSE_ARGS="-c cli-docs-tool -l apache"
|
||
+ARG LICENSE_FILES=".*\(Dockerfile\|\.go\|\.hcl\|\.sh\)"
|
||
+
|
||
+FROM golangci/golangci-lint:${GOLANGCI_LINT_VERSION}-alpine AS golangci-lint
|
||
+FROM ghcr.io/google/addlicense:${ADDLICENSE_VERSION} AS addlicense
|
||
+
|
||
+FROM golang:${GO_VERSION}-alpine AS base
|
||
+RUN apk add --no-cache cpio findutils git linux-headers
|
||
+ENV CGO_ENABLED=0
|
||
+WORKDIR /src
|
||
+
|
||
+FROM base AS vendored
|
||
+RUN --mount=type=bind,target=.,rw \
|
||
+ --mount=type=cache,target=/go/pkg/mod \
|
||
+ go mod tidy && go mod download && \
|
||
+ mkdir /out && cp go.mod go.sum /out
|
||
+
|
||
+FROM scratch AS vendor-update
|
||
+COPY --from=vendored /out /
|
||
+
|
||
+FROM vendored AS vendor-validate
|
||
+RUN --mount=type=bind,target=.,rw <<EOT
|
||
+set -e
|
||
+git add -A
|
||
+cp -rf /out/* .
|
||
+diff=$(git status --porcelain -- go.mod go.sum)
|
||
+if [ -n "$diff" ]; then
|
||
+ echo >&2 'ERROR: Vendor result differs. Please vendor your package with "docker buildx bake vendor"'
|
||
+ echo "$diff"
|
||
+ exit 1
|
||
+fi
|
||
+EOT
|
||
+
|
||
+FROM base AS lint
|
||
+RUN --mount=type=bind,target=. \
|
||
+ --mount=type=cache,target=/root/.cache \
|
||
+ --mount=from=golangci-lint,source=/usr/bin/golangci-lint,target=/usr/bin/golangci-lint \
|
||
+ golangci-lint run ./...
|
||
+
|
||
+FROM base AS license-set
|
||
+ARG LICENSE_ARGS
|
||
+ARG LICENSE_FILES
|
||
+RUN --mount=type=bind,target=.,rw \
|
||
+ --mount=from=addlicense,source=/app/addlicense,target=/usr/bin/addlicense \
|
||
+ find . -regex "${LICENSE_FILES}" | xargs addlicense ${LICENSE_ARGS} \
|
||
+ && mkdir /out \
|
||
+ && find . -regex "${LICENSE_FILES}" | cpio -pdm /out
|
||
+
|
||
+FROM scratch AS license-update
|
||
+COPY --from=set /out /
|
||
+
|
||
+FROM base AS license-validate
|
||
+ARG LICENSE_ARGS
|
||
+ARG LICENSE_FILES
|
||
+RUN --mount=type=bind,target=. \
|
||
+ --mount=from=addlicense,source=/app/addlicense,target=/usr/bin/addlicense \
|
||
+ find . -regex "${LICENSE_FILES}" | xargs addlicense -check ${LICENSE_ARGS}
|
||
+
|
||
+FROM vendored AS test
|
||
+RUN --mount=type=bind,target=. \
|
||
+ --mount=type=cache,target=/root/.cache \
|
||
+ --mount=type=cache,target=/go/pkg/mod \
|
||
+ go test -v -coverprofile=/tmp/coverage.txt -covermode=atomic ./...
|
||
+
|
||
+FROM scratch AS test-coverage
|
||
+COPY --from=test /tmp/coverage.txt /coverage.txt
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/LICENSE b/vendor/github.com/docker/cli-docs-tool/LICENSE
|
||
new file mode 100644
|
||
index 000000000000..d64569567334
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/LICENSE
|
||
@@ -0,0 +1,202 @@
|
||
+
|
||
+ Apache License
|
||
+ Version 2.0, January 2004
|
||
+ http://www.apache.org/licenses/
|
||
+
|
||
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||
+
|
||
+ 1. Definitions.
|
||
+
|
||
+ "License" shall mean the terms and conditions for use, reproduction,
|
||
+ and distribution as defined by Sections 1 through 9 of this document.
|
||
+
|
||
+ "Licensor" shall mean the copyright owner or entity authorized by
|
||
+ the copyright owner that is granting the License.
|
||
+
|
||
+ "Legal Entity" shall mean the union of the acting entity and all
|
||
+ other entities that control, are controlled by, or are under common
|
||
+ control with that entity. For the purposes of this definition,
|
||
+ "control" means (i) the power, direct or indirect, to cause the
|
||
+ direction or management of such entity, whether by contract or
|
||
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||
+ outstanding shares, or (iii) beneficial ownership of such entity.
|
||
+
|
||
+ "You" (or "Your") shall mean an individual or Legal Entity
|
||
+ exercising permissions granted by this License.
|
||
+
|
||
+ "Source" form shall mean the preferred form for making modifications,
|
||
+ including but not limited to software source code, documentation
|
||
+ source, and configuration files.
|
||
+
|
||
+ "Object" form shall mean any form resulting from mechanical
|
||
+ transformation or translation of a Source form, including but
|
||
+ not limited to compiled object code, generated documentation,
|
||
+ and conversions to other media types.
|
||
+
|
||
+ "Work" shall mean the work of authorship, whether in Source or
|
||
+ Object form, made available under the License, as indicated by a
|
||
+ copyright notice that is included in or attached to the work
|
||
+ (an example is provided in the Appendix below).
|
||
+
|
||
+ "Derivative Works" shall mean any work, whether in Source or Object
|
||
+ form, that is based on (or derived from) the Work and for which the
|
||
+ editorial revisions, annotations, elaborations, or other modifications
|
||
+ represent, as a whole, an original work of authorship. For the purposes
|
||
+ of this License, Derivative Works shall not include works that remain
|
||
+ separable from, or merely link (or bind by name) to the interfaces of,
|
||
+ the Work and Derivative Works thereof.
|
||
+
|
||
+ "Contribution" shall mean any work of authorship, including
|
||
+ the original version of the Work and any modifications or additions
|
||
+ to that Work or Derivative Works thereof, that is intentionally
|
||
+ submitted to Licensor for inclusion in the Work by the copyright owner
|
||
+ or by an individual or Legal Entity authorized to submit on behalf of
|
||
+ the copyright owner. For the purposes of this definition, "submitted"
|
||
+ means any form of electronic, verbal, or written communication sent
|
||
+ to the Licensor or its representatives, including but not limited to
|
||
+ communication on electronic mailing lists, source code control systems,
|
||
+ and issue tracking systems that are managed by, or on behalf of, the
|
||
+ Licensor for the purpose of discussing and improving the Work, but
|
||
+ excluding communication that is conspicuously marked or otherwise
|
||
+ designated in writing by the copyright owner as "Not a Contribution."
|
||
+
|
||
+ "Contributor" shall mean Licensor and any individual or Legal Entity
|
||
+ on behalf of whom a Contribution has been received by Licensor and
|
||
+ subsequently incorporated within the Work.
|
||
+
|
||
+ 2. Grant of Copyright License. Subject to the terms and conditions of
|
||
+ this License, each Contributor hereby grants to You a perpetual,
|
||
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||
+ copyright license to reproduce, prepare Derivative Works of,
|
||
+ publicly display, publicly perform, sublicense, and distribute the
|
||
+ Work and such Derivative Works in Source or Object form.
|
||
+
|
||
+ 3. Grant of Patent License. Subject to the terms and conditions of
|
||
+ this License, each Contributor hereby grants to You a perpetual,
|
||
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||
+ (except as stated in this section) patent license to make, have made,
|
||
+ use, offer to sell, sell, import, and otherwise transfer the Work,
|
||
+ where such license applies only to those patent claims licensable
|
||
+ by such Contributor that are necessarily infringed by their
|
||
+ Contribution(s) alone or by combination of their Contribution(s)
|
||
+ with the Work to which such Contribution(s) was submitted. If You
|
||
+ institute patent litigation against any entity (including a
|
||
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||
+ or a Contribution incorporated within the Work constitutes direct
|
||
+ or contributory patent infringement, then any patent licenses
|
||
+ granted to You under this License for that Work shall terminate
|
||
+ as of the date such litigation is filed.
|
||
+
|
||
+ 4. Redistribution. You may reproduce and distribute copies of the
|
||
+ Work or Derivative Works thereof in any medium, with or without
|
||
+ modifications, and in Source or Object form, provided that You
|
||
+ meet the following conditions:
|
||
+
|
||
+ (a) You must give any other recipients of the Work or
|
||
+ Derivative Works a copy of this License; and
|
||
+
|
||
+ (b) You must cause any modified files to carry prominent notices
|
||
+ stating that You changed the files; and
|
||
+
|
||
+ (c) You must retain, in the Source form of any Derivative Works
|
||
+ that You distribute, all copyright, patent, trademark, and
|
||
+ attribution notices from the Source form of the Work,
|
||
+ excluding those notices that do not pertain to any part of
|
||
+ the Derivative Works; and
|
||
+
|
||
+ (d) If the Work includes a "NOTICE" text file as part of its
|
||
+ distribution, then any Derivative Works that You distribute must
|
||
+ include a readable copy of the attribution notices contained
|
||
+ within such NOTICE file, excluding those notices that do not
|
||
+ pertain to any part of the Derivative Works, in at least one
|
||
+ of the following places: within a NOTICE text file distributed
|
||
+ as part of the Derivative Works; within the Source form or
|
||
+ documentation, if provided along with the Derivative Works; or,
|
||
+ within a display generated by the Derivative Works, if and
|
||
+ wherever such third-party notices normally appear. The contents
|
||
+ of the NOTICE file are for informational purposes only and
|
||
+ do not modify the License. You may add Your own attribution
|
||
+ notices within Derivative Works that You distribute, alongside
|
||
+ or as an addendum to the NOTICE text from the Work, provided
|
||
+ that such additional attribution notices cannot be construed
|
||
+ as modifying the License.
|
||
+
|
||
+ You may add Your own copyright statement to Your modifications and
|
||
+ may provide additional or different license terms and conditions
|
||
+ for use, reproduction, or distribution of Your modifications, or
|
||
+ for any such Derivative Works as a whole, provided Your use,
|
||
+ reproduction, and distribution of the Work otherwise complies with
|
||
+ the conditions stated in this License.
|
||
+
|
||
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
|
||
+ any Contribution intentionally submitted for inclusion in the Work
|
||
+ by You to the Licensor shall be under the terms and conditions of
|
||
+ this License, without any additional terms or conditions.
|
||
+ Notwithstanding the above, nothing herein shall supersede or modify
|
||
+ the terms of any separate license agreement you may have executed
|
||
+ with Licensor regarding such Contributions.
|
||
+
|
||
+ 6. Trademarks. This License does not grant permission to use the trade
|
||
+ names, trademarks, service marks, or product names of the Licensor,
|
||
+ except as required for reasonable and customary use in describing the
|
||
+ origin of the Work and reproducing the content of the NOTICE file.
|
||
+
|
||
+ 7. Disclaimer of Warranty. Unless required by applicable law or
|
||
+ agreed to in writing, Licensor provides the Work (and each
|
||
+ Contributor provides its Contributions) on an "AS IS" BASIS,
|
||
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||
+ implied, including, without limitation, any warranties or conditions
|
||
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||
+ PARTICULAR PURPOSE. You are solely responsible for determining the
|
||
+ appropriateness of using or redistributing the Work and assume any
|
||
+ risks associated with Your exercise of permissions under this License.
|
||
+
|
||
+ 8. Limitation of Liability. In no event and under no legal theory,
|
||
+ whether in tort (including negligence), contract, or otherwise,
|
||
+ unless required by applicable law (such as deliberate and grossly
|
||
+ negligent acts) or agreed to in writing, shall any Contributor be
|
||
+ liable to You for damages, including any direct, indirect, special,
|
||
+ incidental, or consequential damages of any character arising as a
|
||
+ result of this License or out of the use or inability to use the
|
||
+ Work (including but not limited to damages for loss of goodwill,
|
||
+ work stoppage, computer failure or malfunction, or any and all
|
||
+ other commercial damages or losses), even if such Contributor
|
||
+ has been advised of the possibility of such damages.
|
||
+
|
||
+ 9. Accepting Warranty or Additional Liability. While redistributing
|
||
+ the Work or Derivative Works thereof, You may choose to offer,
|
||
+ and charge a fee for, acceptance of support, warranty, indemnity,
|
||
+ or other liability obligations and/or rights consistent with this
|
||
+ License. However, in accepting such obligations, You may act only
|
||
+ on Your own behalf and on Your sole responsibility, not on behalf
|
||
+ of any other Contributor, and only if You agree to indemnify,
|
||
+ defend, and hold each Contributor harmless for any liability
|
||
+ incurred by, or claims asserted against, such Contributor by reason
|
||
+ of your accepting any such warranty or additional liability.
|
||
+
|
||
+ END OF TERMS AND CONDITIONS
|
||
+
|
||
+ APPENDIX: How to apply the Apache License to your work.
|
||
+
|
||
+ To apply the Apache License to your work, attach the following
|
||
+ boilerplate notice, with the fields enclosed by brackets "[]"
|
||
+ replaced with your own identifying information. (Don't include
|
||
+ the brackets!) The text should be enclosed in the appropriate
|
||
+ comment syntax for the file format. We also recommend that a
|
||
+ file or class name and description of purpose be included on the
|
||
+ same "printed page" as the copyright notice for easier
|
||
+ identification within third-party archives.
|
||
+
|
||
+ Copyright [yyyy] [name of copyright owner]
|
||
+
|
||
+ Licensed under the Apache License, Version 2.0 (the "License");
|
||
+ you may not use this file except in compliance with the License.
|
||
+ You may obtain a copy of the License at
|
||
+
|
||
+ http://www.apache.org/licenses/LICENSE-2.0
|
||
+
|
||
+ Unless required by applicable law or agreed to in writing, software
|
||
+ distributed under the License is distributed on an "AS IS" BASIS,
|
||
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+ See the License for the specific language governing permissions and
|
||
+ limitations under the License.
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/README.md b/vendor/github.com/docker/cli-docs-tool/README.md
|
||
new file mode 100644
|
||
index 000000000000..4d5ee6474f8f
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/README.md
|
||
@@ -0,0 +1,67 @@
|
||
+[![PkgGoDev](https://img.shields.io/badge/go.dev-docs-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/docker/cli-docs-tool)
|
||
+[![Test Status](https://img.shields.io/github/actions/workflow/status/docker/cli-docs-tool/test.yml?branch=main&label=test&logo=github&style=flat-square)](https://github.com/docker/cli-docs-tool/actions?query=workflow%3Atest)
|
||
+[![Go Report Card](https://goreportcard.com/badge/github.com/docker/cli-docs-tool)](https://goreportcard.com/report/github.com/docker/cli-docs-tool)
|
||
+
|
||
+## About
|
||
+
|
||
+This is a library containing utilities to generate (reference) documentation
|
||
+for the [`docker` CLI](https://github.com/docker/cli) on [docs.docker.com](https://docs.docker.com/reference/).
|
||
+
|
||
+## Disclaimer
|
||
+
|
||
+This library is intended for use by Docker's CLIs, and is not intended to be a
|
||
+general-purpose utility. Various bits are hard-coded or make assumptions that
|
||
+are very specific to our use-case. Contributions are welcome, but we will not
|
||
+accept contributions to make this a general-purpose module.
|
||
+
|
||
+## Usage
|
||
+
|
||
+To generate the documentation it's recommended to do so using a Go submodule
|
||
+in your repository.
|
||
+
|
||
+We will use the example of `docker/buildx` and create a Go submodule in a
|
||
+`docs` folder (recommended):
|
||
+
|
||
+```console
|
||
+$ mkdir docs
|
||
+$ cd ./docs
|
||
+$ go mod init github.com/docker/buildx/docs
|
||
+$ go get github.com/docker/cli-docs-tool
|
||
+```
|
||
+
|
||
+Your `go.mod` should look like this:
|
||
+
|
||
+```text
|
||
+module github.com/docker/buildx/docs
|
||
+
|
||
+go 1.16
|
||
+
|
||
+require (
|
||
+ github.com/docker/cli-docs-tool v0.0.0
|
||
+)
|
||
+```
|
||
+
|
||
+Next, create a file named `main.go` inside that directory containing the
|
||
+following Go code from [`example/main.go`](example/main.go).
|
||
+
|
||
+Running this example should produce the following output:
|
||
+
|
||
+```console
|
||
+$ go run main.go
|
||
+INFO: Generating Markdown for "docker buildx bake"
|
||
+INFO: Generating Markdown for "docker buildx build"
|
||
+INFO: Generating Markdown for "docker buildx create"
|
||
+INFO: Generating Markdown for "docker buildx du"
|
||
+...
|
||
+INFO: Generating YAML for "docker buildx uninstall"
|
||
+INFO: Generating YAML for "docker buildx use"
|
||
+INFO: Generating YAML for "docker buildx version"
|
||
+INFO: Generating YAML for "docker buildx"
|
||
+```
|
||
+
|
||
+Generated docs will be available in the `./docs` folder of the project.
|
||
+
|
||
+## Contributing
|
||
+
|
||
+Want to contribute? Awesome! You can find information about contributing to
|
||
+this project in the [CONTRIBUTING.md](/.github/CONTRIBUTING.md)
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/annotation/annotation.go b/vendor/github.com/docker/cli-docs-tool/annotation/annotation.go
|
||
new file mode 100644
|
||
index 000000000000..021846af6e07
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/annotation/annotation.go
|
||
@@ -0,0 +1,25 @@
|
||
+// Copyright 2021 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package annotation
|
||
+
|
||
+const (
|
||
+ // ExternalURL specifies an external link annotation
|
||
+ ExternalURL = "docs.external.url"
|
||
+ // CodeDelimiter specifies the char that will be converted as code backtick.
|
||
+ // Can be used on cmd for inheritance or a specific flag.
|
||
+ CodeDelimiter = "docs.code-delimiter"
|
||
+ // DefaultValue specifies the default value for a flag.
|
||
+ DefaultValue = "docs.default-value"
|
||
+)
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/clidocstool.go b/vendor/github.com/docker/cli-docs-tool/clidocstool.go
|
||
new file mode 100644
|
||
index 000000000000..d4aeaba3f126
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/clidocstool.go
|
||
@@ -0,0 +1,123 @@
|
||
+// Copyright 2017 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package clidocstool
|
||
+
|
||
+import (
|
||
+ "errors"
|
||
+ "io"
|
||
+ "os"
|
||
+ "strings"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+)
|
||
+
|
||
+// Options defines options for cli-docs-tool
|
||
+type Options struct {
|
||
+ Root *cobra.Command
|
||
+ SourceDir string
|
||
+ TargetDir string
|
||
+ Plugin bool
|
||
+}
|
||
+
|
||
+// Client represents an active cli-docs-tool object
|
||
+type Client struct {
|
||
+ root *cobra.Command
|
||
+ source string
|
||
+ target string
|
||
+ plugin bool
|
||
+}
|
||
+
|
||
+// New initializes a new cli-docs-tool client
|
||
+func New(opts Options) (*Client, error) {
|
||
+ if opts.Root == nil {
|
||
+ return nil, errors.New("root cmd required")
|
||
+ }
|
||
+ if len(opts.SourceDir) == 0 {
|
||
+ return nil, errors.New("source dir required")
|
||
+ }
|
||
+ c := &Client{
|
||
+ root: opts.Root,
|
||
+ source: opts.SourceDir,
|
||
+ plugin: opts.Plugin,
|
||
+ }
|
||
+ if len(opts.TargetDir) == 0 {
|
||
+ c.target = c.source
|
||
+ } else {
|
||
+ c.target = opts.TargetDir
|
||
+ }
|
||
+ if err := os.MkdirAll(c.target, 0755); err != nil {
|
||
+ return nil, err
|
||
+ }
|
||
+ return c, nil
|
||
+}
|
||
+
|
||
+// GenAllTree creates all structured ref files for this command and
|
||
+// all descendants in the directory given.
|
||
+func (c *Client) GenAllTree() error {
|
||
+ var err error
|
||
+ if err = c.GenMarkdownTree(c.root); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err = c.GenYamlTree(c.root); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func fileExists(f string) bool {
|
||
+ info, err := os.Stat(f)
|
||
+ if os.IsNotExist(err) {
|
||
+ return false
|
||
+ }
|
||
+ return !info.IsDir()
|
||
+}
|
||
+
|
||
+func copyFile(src string, dst string) error {
|
||
+ sf, err := os.Open(src)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer sf.Close()
|
||
+ df, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY, 0o600)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer df.Close()
|
||
+ _, err = io.Copy(df, sf)
|
||
+ return err
|
||
+}
|
||
+
|
||
+func getAliases(cmd *cobra.Command) []string {
|
||
+ if a := cmd.Annotations["aliases"]; a != "" {
|
||
+ aliases := strings.Split(a, ",")
|
||
+ for i := 0; i < len(aliases); i++ {
|
||
+ aliases[i] = strings.TrimSpace(aliases[i])
|
||
+ }
|
||
+ return aliases
|
||
+ }
|
||
+ if len(cmd.Aliases) == 0 {
|
||
+ return cmd.Aliases
|
||
+ }
|
||
+
|
||
+ var parentPath string
|
||
+ if cmd.HasParent() {
|
||
+ parentPath = cmd.Parent().CommandPath() + " "
|
||
+ }
|
||
+ aliases := []string{cmd.CommandPath()}
|
||
+ for _, a := range cmd.Aliases {
|
||
+ aliases = append(aliases, parentPath+a)
|
||
+ }
|
||
+ return aliases
|
||
+}
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/clidocstool_md.go b/vendor/github.com/docker/cli-docs-tool/clidocstool_md.go
|
||
new file mode 100644
|
||
index 000000000000..1dee58c06ca1
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/clidocstool_md.go
|
||
@@ -0,0 +1,280 @@
|
||
+// Copyright 2021 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package clidocstool
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "log"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "regexp"
|
||
+ "strings"
|
||
+ "text/tabwriter"
|
||
+ "text/template"
|
||
+
|
||
+ "github.com/docker/cli-docs-tool/annotation"
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/pflag"
|
||
+)
|
||
+
|
||
+var (
|
||
+ nlRegexp = regexp.MustCompile(`\r?\n`)
|
||
+ adjustSep = regexp.MustCompile(`\|:---(\s+)`)
|
||
+)
|
||
+
|
||
+// GenMarkdownTree will generate a markdown page for this command and all
|
||
+// descendants in the directory given.
|
||
+func (c *Client) GenMarkdownTree(cmd *cobra.Command) error {
|
||
+ for _, sc := range cmd.Commands() {
|
||
+ if err := c.GenMarkdownTree(sc); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // always disable the addition of [flags] to the usage
|
||
+ cmd.DisableFlagsInUseLine = true
|
||
+
|
||
+ // Skip the root command altogether, to prevent generating a useless
|
||
+ // md file for plugins.
|
||
+ if c.plugin && !cmd.HasParent() {
|
||
+ return nil
|
||
+ }
|
||
+
|
||
+ // Skip hidden command
|
||
+ if cmd.Hidden {
|
||
+ log.Printf("INFO: Skipping Markdown for %q (hidden command)", cmd.CommandPath())
|
||
+ return nil
|
||
+ }
|
||
+
|
||
+ log.Printf("INFO: Generating Markdown for %q", cmd.CommandPath())
|
||
+ mdFile := mdFilename(cmd)
|
||
+ sourcePath := filepath.Join(c.source, mdFile)
|
||
+ targetPath := filepath.Join(c.target, mdFile)
|
||
+
|
||
+ // check recursively to handle inherited annotations
|
||
+ for curr := cmd; curr != nil; curr = curr.Parent() {
|
||
+ if _, ok := cmd.Annotations[annotation.CodeDelimiter]; !ok {
|
||
+ if cd, cok := curr.Annotations[annotation.CodeDelimiter]; cok {
|
||
+ if cmd.Annotations == nil {
|
||
+ cmd.Annotations = map[string]string{}
|
||
+ }
|
||
+ cmd.Annotations[annotation.CodeDelimiter] = cd
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if !fileExists(sourcePath) {
|
||
+ var icBuf bytes.Buffer
|
||
+ icTpl, err := template.New("ic").Option("missingkey=error").Parse(`# {{ .Command }}
|
||
+
|
||
+<!---MARKER_GEN_START-->
|
||
+<!---MARKER_GEN_END-->
|
||
+
|
||
+`)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err = icTpl.Execute(&icBuf, struct {
|
||
+ Command string
|
||
+ }{
|
||
+ Command: cmd.CommandPath(),
|
||
+ }); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err = os.WriteFile(targetPath, icBuf.Bytes(), 0644); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ } else if err := copyFile(sourcePath, targetPath); err != nil {
|
||
+ return err
|
||
+ }
|
||
+
|
||
+ content, err := os.ReadFile(targetPath)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+
|
||
+ cs := string(content)
|
||
+
|
||
+ start := strings.Index(cs, "<!---MARKER_GEN_START-->")
|
||
+ end := strings.Index(cs, "<!---MARKER_GEN_END-->")
|
||
+
|
||
+ if start == -1 {
|
||
+ return fmt.Errorf("no start marker in %s", mdFile)
|
||
+ }
|
||
+ if end == -1 {
|
||
+ return fmt.Errorf("no end marker in %s", mdFile)
|
||
+ }
|
||
+
|
||
+ out, err := mdCmdOutput(cmd, cs)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ cont := cs[:start] + "<!---MARKER_GEN_START-->" + "\n" + out + "\n" + cs[end:]
|
||
+
|
||
+ fi, err := os.Stat(targetPath)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err = os.WriteFile(targetPath, []byte(cont), fi.Mode()); err != nil {
|
||
+ return fmt.Errorf("failed to write %s: %w", targetPath, err)
|
||
+ }
|
||
+
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func mdFilename(cmd *cobra.Command) string {
|
||
+ name := cmd.CommandPath()
|
||
+ if i := strings.Index(name, " "); i >= 0 {
|
||
+ name = name[i+1:]
|
||
+ }
|
||
+ return strings.ReplaceAll(name, " ", "_") + ".md"
|
||
+}
|
||
+
|
||
+func mdMakeLink(txt, link string, f *pflag.Flag, isAnchor bool) string {
|
||
+ link = "#" + link
|
||
+ annotations, ok := f.Annotations[annotation.ExternalURL]
|
||
+ if ok && len(annotations) > 0 {
|
||
+ link = annotations[0]
|
||
+ } else {
|
||
+ if !isAnchor {
|
||
+ return txt
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return "[" + txt + "](" + link + ")"
|
||
+}
|
||
+
|
||
+type mdTable struct {
|
||
+ out *strings.Builder
|
||
+ tabWriter *tabwriter.Writer
|
||
+}
|
||
+
|
||
+func newMdTable(headers ...string) *mdTable {
|
||
+ w := &strings.Builder{}
|
||
+ t := &mdTable{
|
||
+ out: w,
|
||
+ // Using tabwriter.Debug, which uses "|" as separator instead of tabs,
|
||
+ // which is what we want. It's a bit of a hack, but does the job :)
|
||
+ tabWriter: tabwriter.NewWriter(w, 5, 5, 1, ' ', tabwriter.Debug),
|
||
+ }
|
||
+ t.addHeader(headers...)
|
||
+ return t
|
||
+}
|
||
+
|
||
+func (t *mdTable) addHeader(cols ...string) {
|
||
+ t.AddRow(cols...)
|
||
+ _, _ = t.tabWriter.Write([]byte("|" + strings.Repeat(":---\t", len(cols)) + "\n"))
|
||
+}
|
||
+
|
||
+func (t *mdTable) AddRow(cols ...string) {
|
||
+ for i := range cols {
|
||
+ cols[i] = mdEscapePipe(cols[i])
|
||
+ }
|
||
+ _, _ = t.tabWriter.Write([]byte("| " + strings.Join(cols, "\t ") + "\t\n"))
|
||
+}
|
||
+
|
||
+func (t *mdTable) String() string {
|
||
+ _ = t.tabWriter.Flush()
|
||
+ return adjustSep.ReplaceAllStringFunc(t.out.String()+"\n", func(in string) string {
|
||
+ return strings.ReplaceAll(in, " ", "-")
|
||
+ })
|
||
+}
|
||
+
|
||
+func mdCmdOutput(cmd *cobra.Command, old string) (string, error) {
|
||
+ b := &strings.Builder{}
|
||
+
|
||
+ desc := cmd.Short
|
||
+ if cmd.Long != "" {
|
||
+ desc = cmd.Long
|
||
+ }
|
||
+ if desc != "" {
|
||
+ b.WriteString(desc + "\n\n")
|
||
+ }
|
||
+
|
||
+ if aliases := getAliases(cmd); len(aliases) != 0 {
|
||
+ b.WriteString("### Aliases\n\n")
|
||
+ b.WriteString("`" + strings.Join(aliases, "`, `") + "`")
|
||
+ b.WriteString("\n\n")
|
||
+ }
|
||
+
|
||
+ if len(cmd.Commands()) != 0 {
|
||
+ b.WriteString("### Subcommands\n\n")
|
||
+ table := newMdTable("Name", "Description")
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if c.Hidden {
|
||
+ continue
|
||
+ }
|
||
+ table.AddRow(fmt.Sprintf("[`%s`](%s)", c.Name(), mdFilename(c)), c.Short)
|
||
+ }
|
||
+ b.WriteString(table.String() + "\n")
|
||
+ }
|
||
+
|
||
+ // add inherited flags before checking for flags availability
|
||
+ cmd.Flags().AddFlagSet(cmd.InheritedFlags())
|
||
+
|
||
+ if cmd.Flags().HasAvailableFlags() {
|
||
+ b.WriteString("### Options\n\n")
|
||
+ table := newMdTable("Name", "Type", "Default", "Description")
|
||
+ cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||
+ if f.Hidden {
|
||
+ return
|
||
+ }
|
||
+ isLink := strings.Contains(old, "<a name=\""+f.Name+"\"></a>")
|
||
+ var name string
|
||
+ if f.Shorthand != "" {
|
||
+ name = mdMakeLink("`-"+f.Shorthand+"`", f.Name, f, isLink)
|
||
+ name += ", "
|
||
+ }
|
||
+ name += mdMakeLink("`--"+f.Name+"`", f.Name, f, isLink)
|
||
+
|
||
+ var ftype string
|
||
+ if f.Value.Type() != "bool" {
|
||
+ ftype = "`" + f.Value.Type() + "`"
|
||
+ }
|
||
+
|
||
+ var defval string
|
||
+ if v, ok := f.Annotations[annotation.DefaultValue]; ok && len(v) > 0 {
|
||
+ defval = v[0]
|
||
+ if cd, ok := f.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ defval = strings.ReplaceAll(defval, cd[0], "`")
|
||
+ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ defval = strings.ReplaceAll(defval, cd, "`")
|
||
+ }
|
||
+ } else if f.DefValue != "" && (f.Value.Type() != "bool" && f.DefValue != "true") && f.DefValue != "[]" {
|
||
+ defval = "`" + f.DefValue + "`"
|
||
+ }
|
||
+
|
||
+ usage := f.Usage
|
||
+ if cd, ok := f.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ usage = strings.ReplaceAll(usage, cd[0], "`")
|
||
+ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ usage = strings.ReplaceAll(usage, cd, "`")
|
||
+ }
|
||
+ table.AddRow(name, ftype, defval, mdReplaceNewline(usage))
|
||
+ })
|
||
+ b.WriteString(table.String())
|
||
+ }
|
||
+
|
||
+ return b.String(), nil
|
||
+}
|
||
+
|
||
+func mdEscapePipe(s string) string {
|
||
+ return strings.ReplaceAll(s, `|`, `\|`)
|
||
+}
|
||
+
|
||
+func mdReplaceNewline(s string) string {
|
||
+ return nlRegexp.ReplaceAllString(s, "<br>")
|
||
+}
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go b/vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go
|
||
new file mode 100644
|
||
index 000000000000..523524297af4
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go
|
||
@@ -0,0 +1,435 @@
|
||
+// Copyright 2017 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package clidocstool
|
||
+
|
||
+import (
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "log"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "sort"
|
||
+ "strings"
|
||
+
|
||
+ "github.com/docker/cli-docs-tool/annotation"
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/pflag"
|
||
+ "gopkg.in/yaml.v3"
|
||
+)
|
||
+
|
||
+type cmdOption struct {
|
||
+ Option string
|
||
+ Shorthand string `yaml:",omitempty"`
|
||
+ ValueType string `yaml:"value_type,omitempty"`
|
||
+ DefaultValue string `yaml:"default_value,omitempty"`
|
||
+ Description string `yaml:",omitempty"`
|
||
+ DetailsURL string `yaml:"details_url,omitempty"` // DetailsURL contains an anchor-id or link for more information on this flag
|
||
+ Deprecated bool
|
||
+ Hidden bool
|
||
+ MinAPIVersion string `yaml:"min_api_version,omitempty"`
|
||
+ Experimental bool
|
||
+ ExperimentalCLI bool
|
||
+ Kubernetes bool
|
||
+ Swarm bool
|
||
+ OSType string `yaml:"os_type,omitempty"`
|
||
+}
|
||
+
|
||
+type cmdDoc struct {
|
||
+ Name string `yaml:"command"`
|
||
+ SeeAlso []string `yaml:"parent,omitempty"`
|
||
+ Version string `yaml:"engine_version,omitempty"`
|
||
+ Aliases string `yaml:",omitempty"`
|
||
+ Short string `yaml:",omitempty"`
|
||
+ Long string `yaml:",omitempty"`
|
||
+ Usage string `yaml:",omitempty"`
|
||
+ Pname string `yaml:",omitempty"`
|
||
+ Plink string `yaml:",omitempty"`
|
||
+ Cname []string `yaml:",omitempty"`
|
||
+ Clink []string `yaml:",omitempty"`
|
||
+ Options []cmdOption `yaml:",omitempty"`
|
||
+ InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"`
|
||
+ Example string `yaml:"examples,omitempty"`
|
||
+ Deprecated bool
|
||
+ Hidden bool
|
||
+ MinAPIVersion string `yaml:"min_api_version,omitempty"`
|
||
+ Experimental bool
|
||
+ ExperimentalCLI bool
|
||
+ Kubernetes bool
|
||
+ Swarm bool
|
||
+ OSType string `yaml:"os_type,omitempty"`
|
||
+}
|
||
+
|
||
+// GenYamlTree creates yaml structured ref files for this command and all descendants
|
||
+// in the directory given. This function may not work
|
||
+// correctly if your command names have `-` in them. If you have `cmd` with two
|
||
+// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third`
|
||
+// it is undefined which help output will be in the file `cmd-sub-third.1`.
|
||
+func (c *Client) GenYamlTree(cmd *cobra.Command) error {
|
||
+ emptyStr := func(s string) string { return "" }
|
||
+ if err := c.loadLongDescription(cmd); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return c.genYamlTreeCustom(cmd, emptyStr)
|
||
+}
|
||
+
|
||
+// genYamlTreeCustom creates yaml structured ref files.
|
||
+func (c *Client) genYamlTreeCustom(cmd *cobra.Command, filePrepender func(string) string) error {
|
||
+ for _, sc := range cmd.Commands() {
|
||
+ if !sc.Runnable() && !sc.HasAvailableSubCommands() {
|
||
+ // skip non-runnable commands without subcommands
|
||
+ // but *do* generate YAML for hidden and deprecated commands
|
||
+ // the YAML will have those included as metadata, so that the
|
||
+ // documentation repository can decide whether or not to present them
|
||
+ continue
|
||
+ }
|
||
+ if err := c.genYamlTreeCustom(sc, filePrepender); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // always disable the addition of [flags] to the usage
|
||
+ cmd.DisableFlagsInUseLine = true
|
||
+
|
||
+ // The "root" command used in the generator is just a "stub", and only has a
|
||
+ // list of subcommands, but not (e.g.) global options/flags. We should fix
|
||
+ // that, so that the YAML file for the docker "root" command contains the
|
||
+ // global flags.
|
||
+
|
||
+ // Skip the root command altogether, to prevent generating a useless
|
||
+ // YAML file for plugins.
|
||
+ if c.plugin && !cmd.HasParent() {
|
||
+ return nil
|
||
+ }
|
||
+
|
||
+ log.Printf("INFO: Generating YAML for %q", cmd.CommandPath())
|
||
+ basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".yaml"
|
||
+ target := filepath.Join(c.target, basename)
|
||
+ f, err := os.Create(target)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer f.Close()
|
||
+
|
||
+ if _, err := io.WriteString(f, filePrepender(target)); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return c.genYamlCustom(cmd, f)
|
||
+}
|
||
+
|
||
+// genYamlCustom creates custom yaml output.
|
||
+// nolint: gocyclo
|
||
+func (c *Client) genYamlCustom(cmd *cobra.Command, w io.Writer) error {
|
||
+ const (
|
||
+ // shortMaxWidth is the maximum width for the "Short" description before
|
||
+ // we force YAML to use multi-line syntax. The goal is to make the total
|
||
+ // width fit within 80 characters. This value is based on 80 characters
|
||
+ // minus the with of the field, colon, and whitespace ('short: ').
|
||
+ shortMaxWidth = 73
|
||
+
|
||
+ // longMaxWidth is the maximum width for the "Short" description before
|
||
+ // we force YAML to use multi-line syntax. The goal is to make the total
|
||
+ // width fit within 80 characters. This value is based on 80 characters
|
||
+ // minus the with of the field, colon, and whitespace ('long: ').
|
||
+ longMaxWidth = 74
|
||
+ )
|
||
+
|
||
+ // necessary to add inherited flags otherwise some
|
||
+ // fields are not properly declared like usage
|
||
+ cmd.Flags().AddFlagSet(cmd.InheritedFlags())
|
||
+
|
||
+ cliDoc := cmdDoc{
|
||
+ Name: cmd.CommandPath(),
|
||
+ Aliases: strings.Join(getAliases(cmd), ", "),
|
||
+ Short: forceMultiLine(cmd.Short, shortMaxWidth),
|
||
+ Long: forceMultiLine(cmd.Long, longMaxWidth),
|
||
+ Example: cmd.Example,
|
||
+ Deprecated: len(cmd.Deprecated) > 0,
|
||
+ Hidden: cmd.Hidden,
|
||
+ }
|
||
+
|
||
+ if len(cliDoc.Long) == 0 {
|
||
+ cliDoc.Long = cliDoc.Short
|
||
+ }
|
||
+
|
||
+ if cmd.Runnable() {
|
||
+ cliDoc.Usage = cmd.UseLine()
|
||
+ }
|
||
+
|
||
+ // check recursively to handle inherited annotations
|
||
+ for curr := cmd; curr != nil; curr = curr.Parent() {
|
||
+ if v, ok := curr.Annotations["version"]; ok && cliDoc.MinAPIVersion == "" {
|
||
+ cliDoc.MinAPIVersion = v
|
||
+ }
|
||
+ if _, ok := curr.Annotations["experimental"]; ok && !cliDoc.Experimental {
|
||
+ cliDoc.Experimental = true
|
||
+ }
|
||
+ if _, ok := curr.Annotations["experimentalCLI"]; ok && !cliDoc.ExperimentalCLI {
|
||
+ cliDoc.ExperimentalCLI = true
|
||
+ }
|
||
+ if _, ok := curr.Annotations["kubernetes"]; ok && !cliDoc.Kubernetes {
|
||
+ cliDoc.Kubernetes = true
|
||
+ }
|
||
+ if _, ok := curr.Annotations["swarm"]; ok && !cliDoc.Swarm {
|
||
+ cliDoc.Swarm = true
|
||
+ }
|
||
+ if o, ok := curr.Annotations["ostype"]; ok && cliDoc.OSType == "" {
|
||
+ cliDoc.OSType = o
|
||
+ }
|
||
+ if _, ok := cmd.Annotations[annotation.CodeDelimiter]; !ok {
|
||
+ if cd, cok := curr.Annotations[annotation.CodeDelimiter]; cok {
|
||
+ if cmd.Annotations == nil {
|
||
+ cmd.Annotations = map[string]string{}
|
||
+ }
|
||
+ cmd.Annotations[annotation.CodeDelimiter] = cd
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ anchors := make(map[string]struct{})
|
||
+ if a, ok := cmd.Annotations["anchors"]; ok && a != "" {
|
||
+ for _, anchor := range strings.Split(a, ",") {
|
||
+ anchors[anchor] = struct{}{}
|
||
+ }
|
||
+ }
|
||
+
|
||
+ flags := cmd.NonInheritedFlags()
|
||
+ if flags.HasFlags() {
|
||
+ cliDoc.Options = genFlagResult(cmd, flags, anchors)
|
||
+ }
|
||
+ flags = cmd.InheritedFlags()
|
||
+ if flags.HasFlags() {
|
||
+ cliDoc.InheritedOptions = genFlagResult(cmd, flags, anchors)
|
||
+ }
|
||
+
|
||
+ if hasSeeAlso(cmd) {
|
||
+ if cmd.HasParent() {
|
||
+ parent := cmd.Parent()
|
||
+ cliDoc.Pname = parent.CommandPath()
|
||
+ cliDoc.Plink = strings.Replace(cliDoc.Pname, " ", "_", -1) + ".yaml"
|
||
+ cmd.VisitParents(func(c *cobra.Command) {
|
||
+ if c.DisableAutoGenTag {
|
||
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
|
||
+ }
|
||
+ })
|
||
+ }
|
||
+
|
||
+ children := cmd.Commands()
|
||
+ sort.Sort(byName(children))
|
||
+
|
||
+ for _, child := range children {
|
||
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ cliDoc.Cname = append(cliDoc.Cname, cliDoc.Name+" "+child.Name())
|
||
+ cliDoc.Clink = append(cliDoc.Clink, strings.Replace(cliDoc.Name+"_"+child.Name(), " ", "_", -1)+".yaml")
|
||
+ }
|
||
+ }
|
||
+
|
||
+ final, err := yaml.Marshal(&cliDoc)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+ if _, err := fmt.Fprintln(w, string(final)); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func genFlagResult(cmd *cobra.Command, flags *pflag.FlagSet, anchors map[string]struct{}) []cmdOption {
|
||
+ var (
|
||
+ result []cmdOption
|
||
+ opt cmdOption
|
||
+ )
|
||
+
|
||
+ const (
|
||
+ // shortMaxWidth is the maximum width for the "Short" description before
|
||
+ // we force YAML to use multi-line syntax. The goal is to make the total
|
||
+ // width fit within 80 characters. This value is based on 80 characters
|
||
+ // minus the with of the field, colon, and whitespace (' default_value: ').
|
||
+ defaultValueMaxWidth = 64
|
||
+
|
||
+ // longMaxWidth is the maximum width for the "Short" description before
|
||
+ // we force YAML to use multi-line syntax. The goal is to make the total
|
||
+ // width fit within 80 characters. This value is based on 80 characters
|
||
+ // minus the with of the field, colon, and whitespace (' description: ').
|
||
+ descriptionMaxWidth = 66
|
||
+ )
|
||
+
|
||
+ flags.VisitAll(func(flag *pflag.Flag) {
|
||
+ opt = cmdOption{
|
||
+ Option: flag.Name,
|
||
+ ValueType: flag.Value.Type(),
|
||
+ Deprecated: len(flag.Deprecated) > 0,
|
||
+ Hidden: flag.Hidden,
|
||
+ }
|
||
+
|
||
+ var defval string
|
||
+ if v, ok := flag.Annotations[annotation.DefaultValue]; ok && len(v) > 0 {
|
||
+ defval = v[0]
|
||
+ if cd, ok := flag.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ defval = strings.ReplaceAll(defval, cd[0], "`")
|
||
+ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ defval = strings.ReplaceAll(defval, cd, "`")
|
||
+ }
|
||
+ } else {
|
||
+ defval = flag.DefValue
|
||
+ }
|
||
+ opt.DefaultValue = forceMultiLine(defval, defaultValueMaxWidth)
|
||
+
|
||
+ usage := flag.Usage
|
||
+ if cd, ok := flag.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ usage = strings.ReplaceAll(usage, cd[0], "`")
|
||
+ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ usage = strings.ReplaceAll(usage, cd, "`")
|
||
+ }
|
||
+ opt.Description = forceMultiLine(usage, descriptionMaxWidth)
|
||
+
|
||
+ if v, ok := flag.Annotations[annotation.ExternalURL]; ok && len(v) > 0 {
|
||
+ opt.DetailsURL = strings.TrimPrefix(v[0], "https://docs.docker.com")
|
||
+ } else if _, ok = anchors[flag.Name]; ok {
|
||
+ opt.DetailsURL = "#" + flag.Name
|
||
+ }
|
||
+
|
||
+ // Todo, when we mark a shorthand is deprecated, but specify an empty message.
|
||
+ // The flag.ShorthandDeprecated is empty as the shorthand is deprecated.
|
||
+ // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok.
|
||
+ if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 {
|
||
+ opt.Shorthand = flag.Shorthand
|
||
+ }
|
||
+ if _, ok := flag.Annotations["experimental"]; ok {
|
||
+ opt.Experimental = true
|
||
+ }
|
||
+ if _, ok := flag.Annotations["deprecated"]; ok {
|
||
+ opt.Deprecated = true
|
||
+ }
|
||
+ if v, ok := flag.Annotations["version"]; ok {
|
||
+ opt.MinAPIVersion = v[0]
|
||
+ }
|
||
+ if _, ok := flag.Annotations["experimentalCLI"]; ok {
|
||
+ opt.ExperimentalCLI = true
|
||
+ }
|
||
+ if _, ok := flag.Annotations["kubernetes"]; ok {
|
||
+ opt.Kubernetes = true
|
||
+ }
|
||
+ if _, ok := flag.Annotations["swarm"]; ok {
|
||
+ opt.Swarm = true
|
||
+ }
|
||
+
|
||
+ // Note that the annotation can have multiple ostypes set, however, multiple
|
||
+ // values are currently not used (and unlikely will).
|
||
+ //
|
||
+ // To simplify usage of the os_type property in the YAML, and for consistency
|
||
+ // with the same property for commands, we're only using the first ostype that's set.
|
||
+ if ostypes, ok := flag.Annotations["ostype"]; ok && len(opt.OSType) == 0 && len(ostypes) > 0 {
|
||
+ opt.OSType = ostypes[0]
|
||
+ }
|
||
+
|
||
+ result = append(result, opt)
|
||
+ })
|
||
+
|
||
+ return result
|
||
+}
|
||
+
|
||
+// forceMultiLine appends a newline (\n) to strings that are longer than max
|
||
+// to force the yaml lib to use block notation (https://yaml.org/spec/1.2/spec.html#Block)
|
||
+// instead of a single-line string with newlines and tabs encoded("string\nline1\nline2").
|
||
+//
|
||
+// This makes the generated YAML more readable, and easier to review changes.
|
||
+// max can be used to customize the width to keep the whole line < 80 chars.
|
||
+func forceMultiLine(s string, max int) string {
|
||
+ s = strings.TrimSpace(s)
|
||
+ if len(s) > max && !strings.Contains(s, "\n") {
|
||
+ s = s + "\n"
|
||
+ }
|
||
+ return s
|
||
+}
|
||
+
|
||
+// Small duplication for cobra utils
|
||
+func hasSeeAlso(cmd *cobra.Command) bool {
|
||
+ if cmd.HasParent() {
|
||
+ return true
|
||
+ }
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ return true
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// loadLongDescription gets long descriptions and examples from markdown.
|
||
+func (c *Client) loadLongDescription(parentCmd *cobra.Command) error {
|
||
+ for _, cmd := range parentCmd.Commands() {
|
||
+ if cmd.HasSubCommands() {
|
||
+ if err := c.loadLongDescription(cmd); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+ name := cmd.CommandPath()
|
||
+ if i := strings.Index(name, " "); i >= 0 {
|
||
+ // remove root command / binary name
|
||
+ name = name[i+1:]
|
||
+ }
|
||
+ if name == "" {
|
||
+ continue
|
||
+ }
|
||
+ mdFile := strings.ReplaceAll(name, " ", "_") + ".md"
|
||
+ sourcePath := filepath.Join(c.source, mdFile)
|
||
+ content, err := os.ReadFile(sourcePath)
|
||
+ if os.IsNotExist(err) {
|
||
+ log.Printf("WARN: %s does not exist, skipping Markdown examples for YAML doc\n", mdFile)
|
||
+ continue
|
||
+ }
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ applyDescriptionAndExamples(cmd, string(content))
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// applyDescriptionAndExamples fills in cmd.Long and cmd.Example with the
|
||
+// "Description" and "Examples" H2 sections in mdString (if present).
|
||
+func applyDescriptionAndExamples(cmd *cobra.Command, mdString string) {
|
||
+ sections := getSections(mdString)
|
||
+ var (
|
||
+ anchors []string
|
||
+ md string
|
||
+ )
|
||
+ if sections["description"] != "" {
|
||
+ md, anchors = cleanupMarkDown(sections["description"])
|
||
+ cmd.Long = md
|
||
+ anchors = append(anchors, md)
|
||
+ }
|
||
+ if sections["examples"] != "" {
|
||
+ md, anchors = cleanupMarkDown(sections["examples"])
|
||
+ cmd.Example = md
|
||
+ anchors = append(anchors, md)
|
||
+ }
|
||
+ if len(anchors) > 0 {
|
||
+ if cmd.Annotations == nil {
|
||
+ cmd.Annotations = make(map[string]string)
|
||
+ }
|
||
+ cmd.Annotations["anchors"] = strings.Join(anchors, ",")
|
||
+ }
|
||
+}
|
||
+
|
||
+type byName []*cobra.Command
|
||
+
|
||
+func (s byName) Len() int { return len(s) }
|
||
+func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||
+func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/docker-bake.hcl b/vendor/github.com/docker/cli-docs-tool/docker-bake.hcl
|
||
new file mode 100644
|
||
index 000000000000..4a5f44f83018
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/docker-bake.hcl
|
||
@@ -0,0 +1,51 @@
|
||
+// Copyright 2021 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+group "default" {
|
||
+ targets = ["test"]
|
||
+}
|
||
+
|
||
+group "validate" {
|
||
+ targets = ["lint", "vendor-validate", "license-validate"]
|
||
+}
|
||
+
|
||
+target "lint" {
|
||
+ target = "lint"
|
||
+ output = ["type=cacheonly"]
|
||
+}
|
||
+
|
||
+target "vendor-validate" {
|
||
+ target = "vendor-validate"
|
||
+ output = ["type=cacheonly"]
|
||
+}
|
||
+
|
||
+target "vendor-update" {
|
||
+ target = "vendor-update"
|
||
+ output = ["."]
|
||
+}
|
||
+
|
||
+target "test" {
|
||
+ target = "test-coverage"
|
||
+ output = ["."]
|
||
+}
|
||
+
|
||
+target "license-validate" {
|
||
+ target = "license-validate"
|
||
+ output = ["type=cacheonly"]
|
||
+}
|
||
+
|
||
+target "license-update" {
|
||
+ target = "license-update"
|
||
+ output = ["."]
|
||
+}
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/markdown.go b/vendor/github.com/docker/cli-docs-tool/markdown.go
|
||
new file mode 100644
|
||
index 000000000000..32849236ed9c
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/markdown.go
|
||
@@ -0,0 +1,87 @@
|
||
+// Copyright 2017 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package clidocstool
|
||
+
|
||
+import (
|
||
+ "regexp"
|
||
+ "strings"
|
||
+ "unicode"
|
||
+)
|
||
+
|
||
+var (
|
||
+ // mdHeading matches MarkDown H1..h6 headings. Note that this regex may produce
|
||
+ // false positives for (e.g.) comments in code-blocks (# this is a comment),
|
||
+ // so should not be used as a generic regex for other purposes.
|
||
+ mdHeading = regexp.MustCompile(`^([#]{1,6})\s(.*)$`)
|
||
+ // htmlAnchor matches inline HTML anchors. This is intended to only match anchors
|
||
+ // for our use-case; DO NOT consider using this as a generic regex, or at least
|
||
+ // not before reading https://stackoverflow.com/a/1732454/1811501.
|
||
+ htmlAnchor = regexp.MustCompile(`<a\s+(?:name|id)="?([^"]+)"?\s*></a>\s*`)
|
||
+)
|
||
+
|
||
+// getSections returns all H2 sections by title (lowercase)
|
||
+func getSections(mdString string) map[string]string {
|
||
+ parsedContent := strings.Split("\n"+mdString, "\n## ")
|
||
+ sections := make(map[string]string, len(parsedContent))
|
||
+ for _, s := range parsedContent {
|
||
+ if strings.HasPrefix(s, "#") {
|
||
+ // not a H2 Section
|
||
+ continue
|
||
+ }
|
||
+ parts := strings.SplitN(s, "\n", 2)
|
||
+ if len(parts) == 2 {
|
||
+ sections[strings.ToLower(parts[0])] = parts[1]
|
||
+ }
|
||
+ }
|
||
+ return sections
|
||
+}
|
||
+
|
||
+// cleanupMarkDown cleans up the MarkDown passed in mdString for inclusion in
|
||
+// YAML. It removes trailing whitespace and substitutes tabs for four spaces
|
||
+// to prevent YAML switching to use "compact" form; ("line1 \nline\t2\n")
|
||
+// which, although equivalent, is hard to read.
|
||
+func cleanupMarkDown(mdString string) (md string, anchors []string) {
|
||
+ // remove leading/trailing whitespace, and replace tabs in the whole content
|
||
+ mdString = strings.TrimSpace(mdString)
|
||
+ mdString = strings.ReplaceAll(mdString, "\t", " ")
|
||
+ mdString = strings.ReplaceAll(mdString, "https://docs.docker.com", "")
|
||
+
|
||
+ var id string
|
||
+ // replace trailing whitespace per line, and handle custom anchors
|
||
+ lines := strings.Split(mdString, "\n")
|
||
+ for i := 0; i < len(lines); i++ {
|
||
+ lines[i] = strings.TrimRightFunc(lines[i], unicode.IsSpace)
|
||
+ lines[i], id = convertHTMLAnchor(lines[i])
|
||
+ if id != "" {
|
||
+ anchors = append(anchors, id)
|
||
+ }
|
||
+ }
|
||
+ return strings.Join(lines, "\n"), anchors
|
||
+}
|
||
+
|
||
+// convertHTMLAnchor converts inline anchor-tags in headings (<a name=myanchor></a>)
|
||
+// to an extended-markdown property ({#myanchor}). Extended Markdown properties
|
||
+// are not supported in GitHub Flavored Markdown, but are supported by Jekyll,
|
||
+// and lead to cleaner HTML in our docs, and prevents duplicate anchors.
|
||
+// It returns the converted MarkDown heading and the custom ID (if present)
|
||
+func convertHTMLAnchor(mdLine string) (md string, customID string) {
|
||
+ if m := mdHeading.FindStringSubmatch(mdLine); len(m) > 0 {
|
||
+ if a := htmlAnchor.FindStringSubmatch(m[2]); len(a) > 0 {
|
||
+ customID = a[1]
|
||
+ mdLine = m[1] + " " + htmlAnchor.ReplaceAllString(m[2], "") + " {#" + customID + "}"
|
||
+ }
|
||
+ }
|
||
+ return mdLine, customID
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore
|
||
new file mode 100644
|
||
index 000000000000..75623dcccbb7
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/.gitignore
|
||
@@ -0,0 +1,8 @@
|
||
+*.out
|
||
+*.swp
|
||
+*.8
|
||
+*.6
|
||
+_obj
|
||
+_test*
|
||
+markdown
|
||
+tags
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml
|
||
new file mode 100644
|
||
index 000000000000..b0b525a5a8e1
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/.travis.yml
|
||
@@ -0,0 +1,17 @@
|
||
+sudo: false
|
||
+language: go
|
||
+go:
|
||
+ - "1.10.x"
|
||
+ - "1.11.x"
|
||
+ - tip
|
||
+matrix:
|
||
+ fast_finish: true
|
||
+ allow_failures:
|
||
+ - go: tip
|
||
+install:
|
||
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||
+script:
|
||
+ - go get -t -v ./...
|
||
+ - diff -u <(echo -n) <(gofmt -d -s .)
|
||
+ - go tool vet .
|
||
+ - go test -v ./...
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt
|
||
new file mode 100644
|
||
index 000000000000..2885af3602d8
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt
|
||
@@ -0,0 +1,29 @@
|
||
+Blackfriday is distributed under the Simplified BSD License:
|
||
+
|
||
+> Copyright © 2011 Russ Ross
|
||
+> All rights reserved.
|
||
+>
|
||
+> Redistribution and use in source and binary forms, with or without
|
||
+> modification, are permitted provided that the following conditions
|
||
+> are met:
|
||
+>
|
||
+> 1. Redistributions of source code must retain the above copyright
|
||
+> notice, this list of conditions and the following disclaimer.
|
||
+>
|
||
+> 2. Redistributions in binary form must reproduce the above
|
||
+> copyright notice, this list of conditions and the following
|
||
+> disclaimer in the documentation and/or other materials provided with
|
||
+> the distribution.
|
||
+>
|
||
+> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||
+> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||
+> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||
+> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||
+> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
+> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||
+> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||
+> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||
+> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||
+> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||
+> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||
+> POSSIBILITY OF SUCH DAMAGE.
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md
|
||
new file mode 100644
|
||
index 000000000000..d9c08a22fc54
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/README.md
|
||
@@ -0,0 +1,335 @@
|
||
+Blackfriday
|
||
+[![Build Status][BuildV2SVG]][BuildV2URL]
|
||
+[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL]
|
||
+===========
|
||
+
|
||
+Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
|
||
+is paranoid about its input (so you can safely feed it user-supplied
|
||
+data), it is fast, it supports common extensions (tables, smart
|
||
+punctuation substitutions, etc.), and it is safe for all utf-8
|
||
+(unicode) input.
|
||
+
|
||
+HTML output is currently supported, along with Smartypants
|
||
+extensions.
|
||
+
|
||
+It started as a translation from C of [Sundown][3].
|
||
+
|
||
+
|
||
+Installation
|
||
+------------
|
||
+
|
||
+Blackfriday is compatible with modern Go releases in module mode.
|
||
+With Go installed:
|
||
+
|
||
+ go get github.com/russross/blackfriday/v2
|
||
+
|
||
+will resolve and add the package to the current development module,
|
||
+then build and install it. Alternatively, you can achieve the same
|
||
+if you import it in a package:
|
||
+
|
||
+ import "github.com/russross/blackfriday/v2"
|
||
+
|
||
+and `go get` without parameters.
|
||
+
|
||
+Legacy GOPATH mode is unsupported.
|
||
+
|
||
+
|
||
+Versions
|
||
+--------
|
||
+
|
||
+Currently maintained and recommended version of Blackfriday is `v2`. It's being
|
||
+developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
|
||
+documentation is available at
|
||
+https://pkg.go.dev/github.com/russross/blackfriday/v2.
|
||
+
|
||
+It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`.
|
||
+
|
||
+Version 2 offers a number of improvements over v1:
|
||
+
|
||
+* Cleaned up API
|
||
+* A separate call to [`Parse`][4], which produces an abstract syntax tree for
|
||
+ the document
|
||
+* Latest bug fixes
|
||
+* Flexibility to easily add your own rendering extensions
|
||
+
|
||
+Potential drawbacks:
|
||
+
|
||
+* Our benchmarks show v2 to be slightly slower than v1. Currently in the
|
||
+ ballpark of around 15%.
|
||
+* API breakage. If you can't afford modifying your code to adhere to the new API
|
||
+ and don't care too much about the new features, v2 is probably not for you.
|
||
+* Several bug fixes are trailing behind and still need to be forward-ported to
|
||
+ v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
|
||
+ tracking.
|
||
+
|
||
+If you are still interested in the legacy `v1`, you can import it from
|
||
+`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found
|
||
+here: https://pkg.go.dev/github.com/russross/blackfriday.
|
||
+
|
||
+
|
||
+Usage
|
||
+-----
|
||
+
|
||
+For the most sensible markdown processing, it is as simple as getting your input
|
||
+into a byte slice and calling:
|
||
+
|
||
+```go
|
||
+output := blackfriday.Run(input)
|
||
+```
|
||
+
|
||
+Your input will be parsed and the output rendered with a set of most popular
|
||
+extensions enabled. If you want the most basic feature set, corresponding with
|
||
+the bare Markdown specification, use:
|
||
+
|
||
+```go
|
||
+output := blackfriday.Run(input, blackfriday.WithNoExtensions())
|
||
+```
|
||
+
|
||
+### Sanitize untrusted content
|
||
+
|
||
+Blackfriday itself does nothing to protect against malicious content. If you are
|
||
+dealing with user-supplied markdown, we recommend running Blackfriday's output
|
||
+through HTML sanitizer such as [Bluemonday][5].
|
||
+
|
||
+Here's an example of simple usage of Blackfriday together with Bluemonday:
|
||
+
|
||
+```go
|
||
+import (
|
||
+ "github.com/microcosm-cc/bluemonday"
|
||
+ "github.com/russross/blackfriday/v2"
|
||
+)
|
||
+
|
||
+// ...
|
||
+unsafe := blackfriday.Run(input)
|
||
+html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
|
||
+```
|
||
+
|
||
+### Custom options
|
||
+
|
||
+If you want to customize the set of options, use `blackfriday.WithExtensions`,
|
||
+`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
|
||
+
|
||
+### `blackfriday-tool`
|
||
+
|
||
+You can also check out `blackfriday-tool` for a more complete example
|
||
+of how to use it. Download and install it using:
|
||
+
|
||
+ go get github.com/russross/blackfriday-tool
|
||
+
|
||
+This is a simple command-line tool that allows you to process a
|
||
+markdown file using a standalone program. You can also browse the
|
||
+source directly on github if you are just looking for some example
|
||
+code:
|
||
+
|
||
+* <https://github.com/russross/blackfriday-tool>
|
||
+
|
||
+Note that if you have not already done so, installing
|
||
+`blackfriday-tool` will be sufficient to download and install
|
||
+blackfriday in addition to the tool itself. The tool binary will be
|
||
+installed in `$GOPATH/bin`. This is a statically-linked binary that
|
||
+can be copied to wherever you need it without worrying about
|
||
+dependencies and library versions.
|
||
+
|
||
+### Sanitized anchor names
|
||
+
|
||
+Blackfriday includes an algorithm for creating sanitized anchor names
|
||
+corresponding to a given input text. This algorithm is used to create
|
||
+anchors for headings when `AutoHeadingIDs` extension is enabled. The
|
||
+algorithm has a specification, so that other packages can create
|
||
+compatible anchor names and links to those anchors.
|
||
+
|
||
+The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names.
|
||
+
|
||
+[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to
|
||
+create compatible links to the anchor names generated by blackfriday.
|
||
+This algorithm is also implemented in a small standalone package at
|
||
+[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients
|
||
+that want a small package and don't need full functionality of blackfriday.
|
||
+
|
||
+
|
||
+Features
|
||
+--------
|
||
+
|
||
+All features of Sundown are supported, including:
|
||
+
|
||
+* **Compatibility**. The Markdown v1.0.3 test suite passes with
|
||
+ the `--tidy` option. Without `--tidy`, the differences are
|
||
+ mostly in whitespace and entity escaping, where blackfriday is
|
||
+ more consistent and cleaner.
|
||
+
|
||
+* **Common extensions**, including table support, fenced code
|
||
+ blocks, autolinks, strikethroughs, non-strict emphasis, etc.
|
||
+
|
||
+* **Safety**. Blackfriday is paranoid when parsing, making it safe
|
||
+ to feed untrusted user input without fear of bad things
|
||
+ happening. The test suite stress tests this and there are no
|
||
+ known inputs that make it crash. If you find one, please let me
|
||
+ know and send me the input that does it.
|
||
+
|
||
+ NOTE: "safety" in this context means *runtime safety only*. In order to
|
||
+ protect yourself against JavaScript injection in untrusted content, see
|
||
+ [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
|
||
+
|
||
+* **Fast processing**. It is fast enough to render on-demand in
|
||
+ most web applications without having to cache the output.
|
||
+
|
||
+* **Thread safety**. You can run multiple parsers in different
|
||
+ goroutines without ill effect. There is no dependence on global
|
||
+ shared state.
|
||
+
|
||
+* **Minimal dependencies**. Blackfriday only depends on standard
|
||
+ library packages in Go. The source code is pretty
|
||
+ self-contained, so it is easy to add to any project, including
|
||
+ Google App Engine projects.
|
||
+
|
||
+* **Standards compliant**. Output successfully validates using the
|
||
+ W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
|
||
+
|
||
+
|
||
+Extensions
|
||
+----------
|
||
+
|
||
+In addition to the standard markdown syntax, this package
|
||
+implements the following extensions:
|
||
+
|
||
+* **Intra-word emphasis supression**. The `_` character is
|
||
+ commonly used inside words when discussing code, so having
|
||
+ markdown interpret it as an emphasis command is usually the
|
||
+ wrong thing. Blackfriday lets you treat all emphasis markers as
|
||
+ normal characters when they occur inside a word.
|
||
+
|
||
+* **Tables**. Tables can be created by drawing them in the input
|
||
+ using a simple syntax:
|
||
+
|
||
+ ```
|
||
+ Name | Age
|
||
+ --------|------
|
||
+ Bob | 27
|
||
+ Alice | 23
|
||
+ ```
|
||
+
|
||
+* **Fenced code blocks**. In addition to the normal 4-space
|
||
+ indentation to mark code blocks, you can explicitly mark them
|
||
+ and supply a language (to make syntax highlighting simple). Just
|
||
+ mark it like this:
|
||
+
|
||
+ ```go
|
||
+ func getTrue() bool {
|
||
+ return true
|
||
+ }
|
||
+ ```
|
||
+
|
||
+ You can use 3 or more backticks to mark the beginning of the
|
||
+ block, and the same number to mark the end of the block.
|
||
+
|
||
+ To preserve classes of fenced code blocks while using the bluemonday
|
||
+ HTML sanitizer, use the following policy:
|
||
+
|
||
+ ```go
|
||
+ p := bluemonday.UGCPolicy()
|
||
+ p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code")
|
||
+ html := p.SanitizeBytes(unsafe)
|
||
+ ```
|
||
+
|
||
+* **Definition lists**. A simple definition list is made of a single-line
|
||
+ term followed by a colon and the definition for that term.
|
||
+
|
||
+ Cat
|
||
+ : Fluffy animal everyone likes
|
||
+
|
||
+ Internet
|
||
+ : Vector of transmission for pictures of cats
|
||
+
|
||
+ Terms must be separated from the previous definition by a blank line.
|
||
+
|
||
+* **Footnotes**. A marker in the text that will become a superscript number;
|
||
+ a footnote definition that will be placed in a list of footnotes at the
|
||
+ end of the document. A footnote looks like this:
|
||
+
|
||
+ This is a footnote.[^1]
|
||
+
|
||
+ [^1]: the footnote text.
|
||
+
|
||
+* **Autolinking**. Blackfriday can find URLs that have not been
|
||
+ explicitly marked as links and turn them into links.
|
||
+
|
||
+* **Strikethrough**. Use two tildes (`~~`) to mark text that
|
||
+ should be crossed out.
|
||
+
|
||
+* **Hard line breaks**. With this extension enabled newlines in the input
|
||
+ translate into line breaks in the output. This extension is off by default.
|
||
+
|
||
+* **Smart quotes**. Smartypants-style punctuation substitution is
|
||
+ supported, turning normal double- and single-quote marks into
|
||
+ curly quotes, etc.
|
||
+
|
||
+* **LaTeX-style dash parsing** is an additional option, where `--`
|
||
+ is translated into `–`, and `---` is translated into
|
||
+ `—`. This differs from most smartypants processors, which
|
||
+ turn a single hyphen into an ndash and a double hyphen into an
|
||
+ mdash.
|
||
+
|
||
+* **Smart fractions**, where anything that looks like a fraction
|
||
+ is translated into suitable HTML (instead of just a few special
|
||
+ cases like most smartypant processors). For example, `4/5`
|
||
+ becomes `<sup>4</sup>⁄<sub>5</sub>`, which renders as
|
||
+ <sup>4</sup>⁄<sub>5</sub>.
|
||
+
|
||
+
|
||
+Other renderers
|
||
+---------------
|
||
+
|
||
+Blackfriday is structured to allow alternative rendering engines. Here
|
||
+are a few of note:
|
||
+
|
||
+* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown):
|
||
+ provides a GitHub Flavored Markdown renderer with fenced code block
|
||
+ highlighting, clickable heading anchor links.
|
||
+
|
||
+ It's not customizable, and its goal is to produce HTML output
|
||
+ equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
|
||
+ except the rendering is performed locally.
|
||
+
|
||
+* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
|
||
+ but for markdown.
|
||
+
|
||
+* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex):
|
||
+ renders output as LaTeX.
|
||
+
|
||
+* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience
|
||
+ integration with the [Chroma](https://github.com/alecthomas/chroma) code
|
||
+ highlighting library. bfchroma is only compatible with v2 of Blackfriday and
|
||
+ provides a drop-in renderer ready to use with Blackfriday, as well as
|
||
+ options and means for further customization.
|
||
+
|
||
+* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer.
|
||
+
|
||
+* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style
|
||
+
|
||
+
|
||
+TODO
|
||
+----
|
||
+
|
||
+* More unit testing
|
||
+* Improve Unicode support. It does not understand all Unicode
|
||
+ rules (about what constitutes a letter, a punctuation symbol,
|
||
+ etc.), so it may fail to detect word boundaries correctly in
|
||
+ some instances. It is safe on all UTF-8 input.
|
||
+
|
||
+
|
||
+License
|
||
+-------
|
||
+
|
||
+[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
|
||
+
|
||
+
|
||
+ [1]: https://daringfireball.net/projects/markdown/ "Markdown"
|
||
+ [2]: https://golang.org/ "Go Language"
|
||
+ [3]: https://github.com/vmg/sundown "Sundown"
|
||
+ [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func"
|
||
+ [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
|
||
+
|
||
+ [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2
|
||
+ [BuildV2URL]: https://travis-ci.org/russross/blackfriday
|
||
+ [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2
|
||
+ [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go
|
||
new file mode 100644
|
||
index 000000000000..dcd61e6e35bc
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/block.go
|
||
@@ -0,0 +1,1612 @@
|
||
+//
|
||
+// Blackfriday Markdown Processor
|
||
+// Available at http://github.com/russross/blackfriday
|
||
+//
|
||
+// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||
+// Distributed under the Simplified BSD License.
|
||
+// See README.md for details.
|
||
+//
|
||
+
|
||
+//
|
||
+// Functions to parse block-level elements.
|
||
+//
|
||
+
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "html"
|
||
+ "regexp"
|
||
+ "strings"
|
||
+ "unicode"
|
||
+)
|
||
+
|
||
+const (
|
||
+ charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});"
|
||
+ escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]"
|
||
+)
|
||
+
|
||
+var (
|
||
+ reBackslashOrAmp = regexp.MustCompile("[\\&]")
|
||
+ reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity)
|
||
+)
|
||
+
|
||
+// Parse block-level data.
|
||
+// Note: this function and many that it calls assume that
|
||
+// the input buffer ends with a newline.
|
||
+func (p *Markdown) block(data []byte) {
|
||
+ // this is called recursively: enforce a maximum depth
|
||
+ if p.nesting >= p.maxNesting {
|
||
+ return
|
||
+ }
|
||
+ p.nesting++
|
||
+
|
||
+ // parse out one block-level construct at a time
|
||
+ for len(data) > 0 {
|
||
+ // prefixed heading:
|
||
+ //
|
||
+ // # Heading 1
|
||
+ // ## Heading 2
|
||
+ // ...
|
||
+ // ###### Heading 6
|
||
+ if p.isPrefixHeading(data) {
|
||
+ data = data[p.prefixHeading(data):]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // block of preformatted HTML:
|
||
+ //
|
||
+ // <div>
|
||
+ // ...
|
||
+ // </div>
|
||
+ if data[0] == '<' {
|
||
+ if i := p.html(data, true); i > 0 {
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // title block
|
||
+ //
|
||
+ // % stuff
|
||
+ // % more stuff
|
||
+ // % even more stuff
|
||
+ if p.extensions&Titleblock != 0 {
|
||
+ if data[0] == '%' {
|
||
+ if i := p.titleBlock(data, true); i > 0 {
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // blank lines. note: returns the # of bytes to skip
|
||
+ if i := p.isEmpty(data); i > 0 {
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // indented code block:
|
||
+ //
|
||
+ // func max(a, b int) int {
|
||
+ // if a > b {
|
||
+ // return a
|
||
+ // }
|
||
+ // return b
|
||
+ // }
|
||
+ if p.codePrefix(data) > 0 {
|
||
+ data = data[p.code(data):]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // fenced code block:
|
||
+ //
|
||
+ // ``` go
|
||
+ // func fact(n int) int {
|
||
+ // if n <= 1 {
|
||
+ // return n
|
||
+ // }
|
||
+ // return n * fact(n-1)
|
||
+ // }
|
||
+ // ```
|
||
+ if p.extensions&FencedCode != 0 {
|
||
+ if i := p.fencedCodeBlock(data, true); i > 0 {
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // horizontal rule:
|
||
+ //
|
||
+ // ------
|
||
+ // or
|
||
+ // ******
|
||
+ // or
|
||
+ // ______
|
||
+ if p.isHRule(data) {
|
||
+ p.addBlock(HorizontalRule, nil)
|
||
+ var i int
|
||
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
|
||
+ }
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // block quote:
|
||
+ //
|
||
+ // > A big quote I found somewhere
|
||
+ // > on the web
|
||
+ if p.quotePrefix(data) > 0 {
|
||
+ data = data[p.quote(data):]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // table:
|
||
+ //
|
||
+ // Name | Age | Phone
|
||
+ // ------|-----|---------
|
||
+ // Bob | 31 | 555-1234
|
||
+ // Alice | 27 | 555-4321
|
||
+ if p.extensions&Tables != 0 {
|
||
+ if i := p.table(data); i > 0 {
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // an itemized/unordered list:
|
||
+ //
|
||
+ // * Item 1
|
||
+ // * Item 2
|
||
+ //
|
||
+ // also works with + or -
|
||
+ if p.uliPrefix(data) > 0 {
|
||
+ data = data[p.list(data, 0):]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // a numbered/ordered list:
|
||
+ //
|
||
+ // 1. Item 1
|
||
+ // 2. Item 2
|
||
+ if p.oliPrefix(data) > 0 {
|
||
+ data = data[p.list(data, ListTypeOrdered):]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // definition lists:
|
||
+ //
|
||
+ // Term 1
|
||
+ // : Definition a
|
||
+ // : Definition b
|
||
+ //
|
||
+ // Term 2
|
||
+ // : Definition c
|
||
+ if p.extensions&DefinitionLists != 0 {
|
||
+ if p.dliPrefix(data) > 0 {
|
||
+ data = data[p.list(data, ListTypeDefinition):]
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // anything else must look like a normal paragraph
|
||
+ // note: this finds underlined headings, too
|
||
+ data = data[p.paragraph(data):]
|
||
+ }
|
||
+
|
||
+ p.nesting--
|
||
+}
|
||
+
|
||
+func (p *Markdown) addBlock(typ NodeType, content []byte) *Node {
|
||
+ p.closeUnmatchedBlocks()
|
||
+ container := p.addChild(typ, 0)
|
||
+ container.content = content
|
||
+ return container
|
||
+}
|
||
+
|
||
+func (p *Markdown) isPrefixHeading(data []byte) bool {
|
||
+ if data[0] != '#' {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if p.extensions&SpaceHeadings != 0 {
|
||
+ level := 0
|
||
+ for level < 6 && level < len(data) && data[level] == '#' {
|
||
+ level++
|
||
+ }
|
||
+ if level == len(data) || data[level] != ' ' {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func (p *Markdown) prefixHeading(data []byte) int {
|
||
+ level := 0
|
||
+ for level < 6 && level < len(data) && data[level] == '#' {
|
||
+ level++
|
||
+ }
|
||
+ i := skipChar(data, level, ' ')
|
||
+ end := skipUntilChar(data, i, '\n')
|
||
+ skip := end
|
||
+ id := ""
|
||
+ if p.extensions&HeadingIDs != 0 {
|
||
+ j, k := 0, 0
|
||
+ // find start/end of heading id
|
||
+ for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
|
||
+ }
|
||
+ for k = j + 1; k < end && data[k] != '}'; k++ {
|
||
+ }
|
||
+ // extract heading id iff found
|
||
+ if j < end && k < end {
|
||
+ id = string(data[j+2 : k])
|
||
+ end = j
|
||
+ skip = k + 1
|
||
+ for end > 0 && data[end-1] == ' ' {
|
||
+ end--
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ for end > 0 && data[end-1] == '#' {
|
||
+ if isBackslashEscaped(data, end-1) {
|
||
+ break
|
||
+ }
|
||
+ end--
|
||
+ }
|
||
+ for end > 0 && data[end-1] == ' ' {
|
||
+ end--
|
||
+ }
|
||
+ if end > i {
|
||
+ if id == "" && p.extensions&AutoHeadingIDs != 0 {
|
||
+ id = SanitizedAnchorName(string(data[i:end]))
|
||
+ }
|
||
+ block := p.addBlock(Heading, data[i:end])
|
||
+ block.HeadingID = id
|
||
+ block.Level = level
|
||
+ }
|
||
+ return skip
|
||
+}
|
||
+
|
||
+func (p *Markdown) isUnderlinedHeading(data []byte) int {
|
||
+ // test of level 1 heading
|
||
+ if data[0] == '=' {
|
||
+ i := skipChar(data, 1, '=')
|
||
+ i = skipChar(data, i, ' ')
|
||
+ if i < len(data) && data[i] == '\n' {
|
||
+ return 1
|
||
+ }
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // test of level 2 heading
|
||
+ if data[0] == '-' {
|
||
+ i := skipChar(data, 1, '-')
|
||
+ i = skipChar(data, i, ' ')
|
||
+ if i < len(data) && data[i] == '\n' {
|
||
+ return 2
|
||
+ }
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (p *Markdown) titleBlock(data []byte, doRender bool) int {
|
||
+ if data[0] != '%' {
|
||
+ return 0
|
||
+ }
|
||
+ splitData := bytes.Split(data, []byte("\n"))
|
||
+ var i int
|
||
+ for idx, b := range splitData {
|
||
+ if !bytes.HasPrefix(b, []byte("%")) {
|
||
+ i = idx // - 1
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+
|
||
+ data = bytes.Join(splitData[0:i], []byte("\n"))
|
||
+ consumed := len(data)
|
||
+ data = bytes.TrimPrefix(data, []byte("% "))
|
||
+ data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1)
|
||
+ block := p.addBlock(Heading, data)
|
||
+ block.Level = 1
|
||
+ block.IsTitleblock = true
|
||
+
|
||
+ return consumed
|
||
+}
|
||
+
|
||
+func (p *Markdown) html(data []byte, doRender bool) int {
|
||
+ var i, j int
|
||
+
|
||
+ // identify the opening tag
|
||
+ if data[0] != '<' {
|
||
+ return 0
|
||
+ }
|
||
+ curtag, tagfound := p.htmlFindTag(data[1:])
|
||
+
|
||
+ // handle special cases
|
||
+ if !tagfound {
|
||
+ // check for an HTML comment
|
||
+ if size := p.htmlComment(data, doRender); size > 0 {
|
||
+ return size
|
||
+ }
|
||
+
|
||
+ // check for an <hr> tag
|
||
+ if size := p.htmlHr(data, doRender); size > 0 {
|
||
+ return size
|
||
+ }
|
||
+
|
||
+ // no special case recognized
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // look for an unindented matching closing tag
|
||
+ // followed by a blank line
|
||
+ found := false
|
||
+ /*
|
||
+ closetag := []byte("\n</" + curtag + ">")
|
||
+ j = len(curtag) + 1
|
||
+ for !found {
|
||
+ // scan for a closing tag at the beginning of a line
|
||
+ if skip := bytes.Index(data[j:], closetag); skip >= 0 {
|
||
+ j += skip + len(closetag)
|
||
+ } else {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // see if it is the only thing on the line
|
||
+ if skip := p.isEmpty(data[j:]); skip > 0 {
|
||
+ // see if it is followed by a blank line/eof
|
||
+ j += skip
|
||
+ if j >= len(data) {
|
||
+ found = true
|
||
+ i = j
|
||
+ } else {
|
||
+ if skip := p.isEmpty(data[j:]); skip > 0 {
|
||
+ j += skip
|
||
+ found = true
|
||
+ i = j
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ */
|
||
+
|
||
+ // if not found, try a second pass looking for indented match
|
||
+ // but not if tag is "ins" or "del" (following original Markdown.pl)
|
||
+ if !found && curtag != "ins" && curtag != "del" {
|
||
+ i = 1
|
||
+ for i < len(data) {
|
||
+ i++
|
||
+ for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ if i+2+len(curtag) >= len(data) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ j = p.htmlFindEnd(curtag, data[i-1:])
|
||
+
|
||
+ if j > 0 {
|
||
+ i += j - 1
|
||
+ found = true
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if !found {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // the end of the block has been found
|
||
+ if doRender {
|
||
+ // trim newlines
|
||
+ end := i
|
||
+ for end > 0 && data[end-1] == '\n' {
|
||
+ end--
|
||
+ }
|
||
+ finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
|
||
+ }
|
||
+
|
||
+ return i
|
||
+}
|
||
+
|
||
+func finalizeHTMLBlock(block *Node) {
|
||
+ block.Literal = block.content
|
||
+ block.content = nil
|
||
+}
|
||
+
|
||
+// HTML comment, lax form
|
||
+func (p *Markdown) htmlComment(data []byte, doRender bool) int {
|
||
+ i := p.inlineHTMLComment(data)
|
||
+ // needs to end with a blank line
|
||
+ if j := p.isEmpty(data[i:]); j > 0 {
|
||
+ size := i + j
|
||
+ if doRender {
|
||
+ // trim trailing newlines
|
||
+ end := size
|
||
+ for end > 0 && data[end-1] == '\n' {
|
||
+ end--
|
||
+ }
|
||
+ block := p.addBlock(HTMLBlock, data[:end])
|
||
+ finalizeHTMLBlock(block)
|
||
+ }
|
||
+ return size
|
||
+ }
|
||
+ return 0
|
||
+}
|
||
+
|
||
+// HR, which is the only self-closing block tag considered
|
||
+func (p *Markdown) htmlHr(data []byte, doRender bool) int {
|
||
+ if len(data) < 4 {
|
||
+ return 0
|
||
+ }
|
||
+ if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
|
||
+ return 0
|
||
+ }
|
||
+ if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
|
||
+ // not an <hr> tag after all; at least not a valid one
|
||
+ return 0
|
||
+ }
|
||
+ i := 3
|
||
+ for i < len(data) && data[i] != '>' && data[i] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+ if i < len(data) && data[i] == '>' {
|
||
+ i++
|
||
+ if j := p.isEmpty(data[i:]); j > 0 {
|
||
+ size := i + j
|
||
+ if doRender {
|
||
+ // trim newlines
|
||
+ end := size
|
||
+ for end > 0 && data[end-1] == '\n' {
|
||
+ end--
|
||
+ }
|
||
+ finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
|
||
+ }
|
||
+ return size
|
||
+ }
|
||
+ }
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (p *Markdown) htmlFindTag(data []byte) (string, bool) {
|
||
+ i := 0
|
||
+ for i < len(data) && isalnum(data[i]) {
|
||
+ i++
|
||
+ }
|
||
+ key := string(data[:i])
|
||
+ if _, ok := blockTags[key]; ok {
|
||
+ return key, true
|
||
+ }
|
||
+ return "", false
|
||
+}
|
||
+
|
||
+func (p *Markdown) htmlFindEnd(tag string, data []byte) int {
|
||
+ // assume data[0] == '<' && data[1] == '/' already tested
|
||
+ if tag == "hr" {
|
||
+ return 2
|
||
+ }
|
||
+ // check if tag is a match
|
||
+ closetag := []byte("</" + tag + ">")
|
||
+ if !bytes.HasPrefix(data, closetag) {
|
||
+ return 0
|
||
+ }
|
||
+ i := len(closetag)
|
||
+
|
||
+ // check that the rest of the line is blank
|
||
+ skip := 0
|
||
+ if skip = p.isEmpty(data[i:]); skip == 0 {
|
||
+ return 0
|
||
+ }
|
||
+ i += skip
|
||
+ skip = 0
|
||
+
|
||
+ if i >= len(data) {
|
||
+ return i
|
||
+ }
|
||
+
|
||
+ if p.extensions&LaxHTMLBlocks != 0 {
|
||
+ return i
|
||
+ }
|
||
+ if skip = p.isEmpty(data[i:]); skip == 0 {
|
||
+ // following line must be blank
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ return i + skip
|
||
+}
|
||
+
|
||
+func (*Markdown) isEmpty(data []byte) int {
|
||
+ // it is okay to call isEmpty on an empty buffer
|
||
+ if len(data) == 0 {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ var i int
|
||
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
|
||
+ if data[i] != ' ' && data[i] != '\t' {
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+ if i < len(data) && data[i] == '\n' {
|
||
+ i++
|
||
+ }
|
||
+ return i
|
||
+}
|
||
+
|
||
+func (*Markdown) isHRule(data []byte) bool {
|
||
+ i := 0
|
||
+
|
||
+ // skip up to three spaces
|
||
+ for i < 3 && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // look at the hrule char
|
||
+ if data[i] != '*' && data[i] != '-' && data[i] != '_' {
|
||
+ return false
|
||
+ }
|
||
+ c := data[i]
|
||
+
|
||
+ // the whole line must be the char or whitespace
|
||
+ n := 0
|
||
+ for i < len(data) && data[i] != '\n' {
|
||
+ switch {
|
||
+ case data[i] == c:
|
||
+ n++
|
||
+ case data[i] != ' ':
|
||
+ return false
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ return n >= 3
|
||
+}
|
||
+
|
||
+// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
|
||
+// and returns the end index if so, or 0 otherwise. It also returns the marker found.
|
||
+// If info is not nil, it gets set to the syntax specified in the fence line.
|
||
+func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) {
|
||
+ i, size := 0, 0
|
||
+
|
||
+ // skip up to three spaces
|
||
+ for i < len(data) && i < 3 && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // check for the marker characters: ~ or `
|
||
+ if i >= len(data) {
|
||
+ return 0, ""
|
||
+ }
|
||
+ if data[i] != '~' && data[i] != '`' {
|
||
+ return 0, ""
|
||
+ }
|
||
+
|
||
+ c := data[i]
|
||
+
|
||
+ // the whole line must be the same char or whitespace
|
||
+ for i < len(data) && data[i] == c {
|
||
+ size++
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // the marker char must occur at least 3 times
|
||
+ if size < 3 {
|
||
+ return 0, ""
|
||
+ }
|
||
+ marker = string(data[i-size : i])
|
||
+
|
||
+ // if this is the end marker, it must match the beginning marker
|
||
+ if oldmarker != "" && marker != oldmarker {
|
||
+ return 0, ""
|
||
+ }
|
||
+
|
||
+ // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
|
||
+ // into one, always get the info string, and discard it if the caller doesn't care.
|
||
+ if info != nil {
|
||
+ infoLength := 0
|
||
+ i = skipChar(data, i, ' ')
|
||
+
|
||
+ if i >= len(data) {
|
||
+ if i == len(data) {
|
||
+ return i, marker
|
||
+ }
|
||
+ return 0, ""
|
||
+ }
|
||
+
|
||
+ infoStart := i
|
||
+
|
||
+ if data[i] == '{' {
|
||
+ i++
|
||
+ infoStart++
|
||
+
|
||
+ for i < len(data) && data[i] != '}' && data[i] != '\n' {
|
||
+ infoLength++
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ if i >= len(data) || data[i] != '}' {
|
||
+ return 0, ""
|
||
+ }
|
||
+
|
||
+ // strip all whitespace at the beginning and the end
|
||
+ // of the {} block
|
||
+ for infoLength > 0 && isspace(data[infoStart]) {
|
||
+ infoStart++
|
||
+ infoLength--
|
||
+ }
|
||
+
|
||
+ for infoLength > 0 && isspace(data[infoStart+infoLength-1]) {
|
||
+ infoLength--
|
||
+ }
|
||
+ i++
|
||
+ i = skipChar(data, i, ' ')
|
||
+ } else {
|
||
+ for i < len(data) && !isverticalspace(data[i]) {
|
||
+ infoLength++
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength]))
|
||
+ }
|
||
+
|
||
+ if i == len(data) {
|
||
+ return i, marker
|
||
+ }
|
||
+ if i > len(data) || data[i] != '\n' {
|
||
+ return 0, ""
|
||
+ }
|
||
+ return i + 1, marker // Take newline into account.
|
||
+}
|
||
+
|
||
+// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
|
||
+// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
|
||
+// If doRender is true, a final newline is mandatory to recognize the fenced code block.
|
||
+func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int {
|
||
+ var info string
|
||
+ beg, marker := isFenceLine(data, &info, "")
|
||
+ if beg == 0 || beg >= len(data) {
|
||
+ return 0
|
||
+ }
|
||
+ fenceLength := beg - 1
|
||
+
|
||
+ var work bytes.Buffer
|
||
+ work.Write([]byte(info))
|
||
+ work.WriteByte('\n')
|
||
+
|
||
+ for {
|
||
+ // safe to assume beg < len(data)
|
||
+
|
||
+ // check for the end of the code block
|
||
+ fenceEnd, _ := isFenceLine(data[beg:], nil, marker)
|
||
+ if fenceEnd != 0 {
|
||
+ beg += fenceEnd
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // copy the current line
|
||
+ end := skipUntilChar(data, beg, '\n') + 1
|
||
+
|
||
+ // did we reach the end of the buffer without a closing marker?
|
||
+ if end >= len(data) {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // verbatim copy to the working buffer
|
||
+ if doRender {
|
||
+ work.Write(data[beg:end])
|
||
+ }
|
||
+ beg = end
|
||
+ }
|
||
+
|
||
+ if doRender {
|
||
+ block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
|
||
+ block.IsFenced = true
|
||
+ block.FenceLength = fenceLength
|
||
+ finalizeCodeBlock(block)
|
||
+ }
|
||
+
|
||
+ return beg
|
||
+}
|
||
+
|
||
+func unescapeChar(str []byte) []byte {
|
||
+ if str[0] == '\\' {
|
||
+ return []byte{str[1]}
|
||
+ }
|
||
+ return []byte(html.UnescapeString(string(str)))
|
||
+}
|
||
+
|
||
+func unescapeString(str []byte) []byte {
|
||
+ if reBackslashOrAmp.Match(str) {
|
||
+ return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar)
|
||
+ }
|
||
+ return str
|
||
+}
|
||
+
|
||
+func finalizeCodeBlock(block *Node) {
|
||
+ if block.IsFenced {
|
||
+ newlinePos := bytes.IndexByte(block.content, '\n')
|
||
+ firstLine := block.content[:newlinePos]
|
||
+ rest := block.content[newlinePos+1:]
|
||
+ block.Info = unescapeString(bytes.Trim(firstLine, "\n"))
|
||
+ block.Literal = rest
|
||
+ } else {
|
||
+ block.Literal = block.content
|
||
+ }
|
||
+ block.content = nil
|
||
+}
|
||
+
|
||
+func (p *Markdown) table(data []byte) int {
|
||
+ table := p.addBlock(Table, nil)
|
||
+ i, columns := p.tableHeader(data)
|
||
+ if i == 0 {
|
||
+ p.tip = table.Parent
|
||
+ table.Unlink()
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ p.addBlock(TableBody, nil)
|
||
+
|
||
+ for i < len(data) {
|
||
+ pipes, rowStart := 0, i
|
||
+ for ; i < len(data) && data[i] != '\n'; i++ {
|
||
+ if data[i] == '|' {
|
||
+ pipes++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if pipes == 0 {
|
||
+ i = rowStart
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // include the newline in data sent to tableRow
|
||
+ if i < len(data) && data[i] == '\n' {
|
||
+ i++
|
||
+ }
|
||
+ p.tableRow(data[rowStart:i], columns, false)
|
||
+ }
|
||
+
|
||
+ return i
|
||
+}
|
||
+
|
||
+// check if the specified position is preceded by an odd number of backslashes
|
||
+func isBackslashEscaped(data []byte, i int) bool {
|
||
+ backslashes := 0
|
||
+ for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
|
||
+ backslashes++
|
||
+ }
|
||
+ return backslashes&1 == 1
|
||
+}
|
||
+
|
||
+func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) {
|
||
+ i := 0
|
||
+ colCount := 1
|
||
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
|
||
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
|
||
+ colCount++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // doesn't look like a table header
|
||
+ if colCount == 1 {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // include the newline in the data sent to tableRow
|
||
+ j := i
|
||
+ if j < len(data) && data[j] == '\n' {
|
||
+ j++
|
||
+ }
|
||
+ header := data[:j]
|
||
+
|
||
+ // column count ignores pipes at beginning or end of line
|
||
+ if data[0] == '|' {
|
||
+ colCount--
|
||
+ }
|
||
+ if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
|
||
+ colCount--
|
||
+ }
|
||
+
|
||
+ columns = make([]CellAlignFlags, colCount)
|
||
+
|
||
+ // move on to the header underline
|
||
+ i++
|
||
+ if i >= len(data) {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
|
||
+ i++
|
||
+ }
|
||
+ i = skipChar(data, i, ' ')
|
||
+
|
||
+ // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
|
||
+ // and trailing | optional on last column
|
||
+ col := 0
|
||
+ for i < len(data) && data[i] != '\n' {
|
||
+ dashes := 0
|
||
+
|
||
+ if data[i] == ':' {
|
||
+ i++
|
||
+ columns[col] |= TableAlignmentLeft
|
||
+ dashes++
|
||
+ }
|
||
+ for i < len(data) && data[i] == '-' {
|
||
+ i++
|
||
+ dashes++
|
||
+ }
|
||
+ if i < len(data) && data[i] == ':' {
|
||
+ i++
|
||
+ columns[col] |= TableAlignmentRight
|
||
+ dashes++
|
||
+ }
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+ if i == len(data) {
|
||
+ return
|
||
+ }
|
||
+ // end of column test is messy
|
||
+ switch {
|
||
+ case dashes < 3:
|
||
+ // not a valid column
|
||
+ return
|
||
+
|
||
+ case data[i] == '|' && !isBackslashEscaped(data, i):
|
||
+ // marker found, now skip past trailing whitespace
|
||
+ col++
|
||
+ i++
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // trailing junk found after last column
|
||
+ if col >= colCount && i < len(data) && data[i] != '\n' {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
|
||
+ // something else found where marker was required
|
||
+ return
|
||
+
|
||
+ case data[i] == '\n':
|
||
+ // marker is optional for the last column
|
||
+ col++
|
||
+
|
||
+ default:
|
||
+ // trailing junk found after last column
|
||
+ return
|
||
+ }
|
||
+ }
|
||
+ if col != colCount {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ p.addBlock(TableHead, nil)
|
||
+ p.tableRow(header, columns, true)
|
||
+ size = i
|
||
+ if size < len(data) && data[size] == '\n' {
|
||
+ size++
|
||
+ }
|
||
+ return
|
||
+}
|
||
+
|
||
+func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) {
|
||
+ p.addBlock(TableRow, nil)
|
||
+ i, col := 0, 0
|
||
+
|
||
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ for col = 0; col < len(columns) && i < len(data); col++ {
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ cellStart := i
|
||
+
|
||
+ for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ cellEnd := i
|
||
+
|
||
+ // skip the end-of-cell marker, possibly taking us past end of buffer
|
||
+ i++
|
||
+
|
||
+ for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' {
|
||
+ cellEnd--
|
||
+ }
|
||
+
|
||
+ cell := p.addBlock(TableCell, data[cellStart:cellEnd])
|
||
+ cell.IsHeader = header
|
||
+ cell.Align = columns[col]
|
||
+ }
|
||
+
|
||
+ // pad it out with empty columns to get the right number
|
||
+ for ; col < len(columns); col++ {
|
||
+ cell := p.addBlock(TableCell, nil)
|
||
+ cell.IsHeader = header
|
||
+ cell.Align = columns[col]
|
||
+ }
|
||
+
|
||
+ // silently ignore rows with too many cells
|
||
+}
|
||
+
|
||
+// returns blockquote prefix length
|
||
+func (p *Markdown) quotePrefix(data []byte) int {
|
||
+ i := 0
|
||
+ for i < 3 && i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+ if i < len(data) && data[i] == '>' {
|
||
+ if i+1 < len(data) && data[i+1] == ' ' {
|
||
+ return i + 2
|
||
+ }
|
||
+ return i + 1
|
||
+ }
|
||
+ return 0
|
||
+}
|
||
+
|
||
+// blockquote ends with at least one blank line
|
||
+// followed by something without a blockquote prefix
|
||
+func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool {
|
||
+ if p.isEmpty(data[beg:]) <= 0 {
|
||
+ return false
|
||
+ }
|
||
+ if end >= len(data) {
|
||
+ return true
|
||
+ }
|
||
+ return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
|
||
+}
|
||
+
|
||
+// parse a blockquote fragment
|
||
+func (p *Markdown) quote(data []byte) int {
|
||
+ block := p.addBlock(BlockQuote, nil)
|
||
+ var raw bytes.Buffer
|
||
+ beg, end := 0, 0
|
||
+ for beg < len(data) {
|
||
+ end = beg
|
||
+ // Step over whole lines, collecting them. While doing that, check for
|
||
+ // fenced code and if one's found, incorporate it altogether,
|
||
+ // irregardless of any contents inside it
|
||
+ for end < len(data) && data[end] != '\n' {
|
||
+ if p.extensions&FencedCode != 0 {
|
||
+ if i := p.fencedCodeBlock(data[end:], false); i > 0 {
|
||
+ // -1 to compensate for the extra end++ after the loop:
|
||
+ end += i - 1
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ end++
|
||
+ }
|
||
+ if end < len(data) && data[end] == '\n' {
|
||
+ end++
|
||
+ }
|
||
+ if pre := p.quotePrefix(data[beg:]); pre > 0 {
|
||
+ // skip the prefix
|
||
+ beg += pre
|
||
+ } else if p.terminateBlockquote(data, beg, end) {
|
||
+ break
|
||
+ }
|
||
+ // this line is part of the blockquote
|
||
+ raw.Write(data[beg:end])
|
||
+ beg = end
|
||
+ }
|
||
+ p.block(raw.Bytes())
|
||
+ p.finalize(block)
|
||
+ return end
|
||
+}
|
||
+
|
||
+// returns prefix length for block code
|
||
+func (p *Markdown) codePrefix(data []byte) int {
|
||
+ if len(data) >= 1 && data[0] == '\t' {
|
||
+ return 1
|
||
+ }
|
||
+ if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
|
||
+ return 4
|
||
+ }
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (p *Markdown) code(data []byte) int {
|
||
+ var work bytes.Buffer
|
||
+
|
||
+ i := 0
|
||
+ for i < len(data) {
|
||
+ beg := i
|
||
+ for i < len(data) && data[i] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+ if i < len(data) && data[i] == '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ blankline := p.isEmpty(data[beg:i]) > 0
|
||
+ if pre := p.codePrefix(data[beg:i]); pre > 0 {
|
||
+ beg += pre
|
||
+ } else if !blankline {
|
||
+ // non-empty, non-prefixed line breaks the pre
|
||
+ i = beg
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // verbatim copy to the working buffer
|
||
+ if blankline {
|
||
+ work.WriteByte('\n')
|
||
+ } else {
|
||
+ work.Write(data[beg:i])
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // trim all the \n off the end of work
|
||
+ workbytes := work.Bytes()
|
||
+ eol := len(workbytes)
|
||
+ for eol > 0 && workbytes[eol-1] == '\n' {
|
||
+ eol--
|
||
+ }
|
||
+ if eol != len(workbytes) {
|
||
+ work.Truncate(eol)
|
||
+ }
|
||
+
|
||
+ work.WriteByte('\n')
|
||
+
|
||
+ block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
|
||
+ block.IsFenced = false
|
||
+ finalizeCodeBlock(block)
|
||
+
|
||
+ return i
|
||
+}
|
||
+
|
||
+// returns unordered list item prefix
|
||
+func (p *Markdown) uliPrefix(data []byte) int {
|
||
+ i := 0
|
||
+ // start with up to 3 spaces
|
||
+ for i < len(data) && i < 3 && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data)-1 {
|
||
+ return 0
|
||
+ }
|
||
+ // need one of {'*', '+', '-'} followed by a space or a tab
|
||
+ if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
|
||
+ (data[i+1] != ' ' && data[i+1] != '\t') {
|
||
+ return 0
|
||
+ }
|
||
+ return i + 2
|
||
+}
|
||
+
|
||
+// returns ordered list item prefix
|
||
+func (p *Markdown) oliPrefix(data []byte) int {
|
||
+ i := 0
|
||
+
|
||
+ // start with up to 3 spaces
|
||
+ for i < 3 && i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // count the digits
|
||
+ start := i
|
||
+ for i < len(data) && data[i] >= '0' && data[i] <= '9' {
|
||
+ i++
|
||
+ }
|
||
+ if start == i || i >= len(data)-1 {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // we need >= 1 digits followed by a dot and a space or a tab
|
||
+ if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') {
|
||
+ return 0
|
||
+ }
|
||
+ return i + 2
|
||
+}
|
||
+
|
||
+// returns definition list item prefix
|
||
+func (p *Markdown) dliPrefix(data []byte) int {
|
||
+ if len(data) < 2 {
|
||
+ return 0
|
||
+ }
|
||
+ i := 0
|
||
+ // need a ':' followed by a space or a tab
|
||
+ if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') {
|
||
+ return 0
|
||
+ }
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+ return i + 2
|
||
+}
|
||
+
|
||
+// parse ordered or unordered list block
|
||
+func (p *Markdown) list(data []byte, flags ListType) int {
|
||
+ i := 0
|
||
+ flags |= ListItemBeginningOfList
|
||
+ block := p.addBlock(List, nil)
|
||
+ block.ListFlags = flags
|
||
+ block.Tight = true
|
||
+
|
||
+ for i < len(data) {
|
||
+ skip := p.listItem(data[i:], &flags)
|
||
+ if flags&ListItemContainsBlock != 0 {
|
||
+ block.ListData.Tight = false
|
||
+ }
|
||
+ i += skip
|
||
+ if skip == 0 || flags&ListItemEndOfList != 0 {
|
||
+ break
|
||
+ }
|
||
+ flags &= ^ListItemBeginningOfList
|
||
+ }
|
||
+
|
||
+ above := block.Parent
|
||
+ finalizeList(block)
|
||
+ p.tip = above
|
||
+ return i
|
||
+}
|
||
+
|
||
+// Returns true if the list item is not the same type as its parent list
|
||
+func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool {
|
||
+ if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 {
|
||
+ return true
|
||
+ } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 {
|
||
+ return true
|
||
+ } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) {
|
||
+ return true
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// Returns true if block ends with a blank line, descending if needed
|
||
+// into lists and sublists.
|
||
+func endsWithBlankLine(block *Node) bool {
|
||
+ // TODO: figure this out. Always false now.
|
||
+ for block != nil {
|
||
+ //if block.lastLineBlank {
|
||
+ //return true
|
||
+ //}
|
||
+ t := block.Type
|
||
+ if t == List || t == Item {
|
||
+ block = block.LastChild
|
||
+ } else {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+func finalizeList(block *Node) {
|
||
+ block.open = false
|
||
+ item := block.FirstChild
|
||
+ for item != nil {
|
||
+ // check for non-final list item ending with blank line:
|
||
+ if endsWithBlankLine(item) && item.Next != nil {
|
||
+ block.ListData.Tight = false
|
||
+ break
|
||
+ }
|
||
+ // recurse into children of list item, to see if there are spaces
|
||
+ // between any of them:
|
||
+ subItem := item.FirstChild
|
||
+ for subItem != nil {
|
||
+ if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) {
|
||
+ block.ListData.Tight = false
|
||
+ break
|
||
+ }
|
||
+ subItem = subItem.Next
|
||
+ }
|
||
+ item = item.Next
|
||
+ }
|
||
+}
|
||
+
|
||
+// Parse a single list item.
|
||
+// Assumes initial prefix is already removed if this is a sublist.
|
||
+func (p *Markdown) listItem(data []byte, flags *ListType) int {
|
||
+ // keep track of the indentation of the first line
|
||
+ itemIndent := 0
|
||
+ if data[0] == '\t' {
|
||
+ itemIndent += 4
|
||
+ } else {
|
||
+ for itemIndent < 3 && data[itemIndent] == ' ' {
|
||
+ itemIndent++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ var bulletChar byte = '*'
|
||
+ i := p.uliPrefix(data)
|
||
+ if i == 0 {
|
||
+ i = p.oliPrefix(data)
|
||
+ } else {
|
||
+ bulletChar = data[i-2]
|
||
+ }
|
||
+ if i == 0 {
|
||
+ i = p.dliPrefix(data)
|
||
+ // reset definition term flag
|
||
+ if i > 0 {
|
||
+ *flags &= ^ListTypeTerm
|
||
+ }
|
||
+ }
|
||
+ if i == 0 {
|
||
+ // if in definition list, set term flag and continue
|
||
+ if *flags&ListTypeDefinition != 0 {
|
||
+ *flags |= ListTypeTerm
|
||
+ } else {
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // skip leading whitespace on first line
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // find the end of the line
|
||
+ line := i
|
||
+ for i > 0 && i < len(data) && data[i-1] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // get working buffer
|
||
+ var raw bytes.Buffer
|
||
+
|
||
+ // put the first line into the working buffer
|
||
+ raw.Write(data[line:i])
|
||
+ line = i
|
||
+
|
||
+ // process the following lines
|
||
+ containsBlankLine := false
|
||
+ sublist := 0
|
||
+ codeBlockMarker := ""
|
||
+
|
||
+gatherlines:
|
||
+ for line < len(data) {
|
||
+ i++
|
||
+
|
||
+ // find the end of this line
|
||
+ for i < len(data) && data[i-1] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // if it is an empty line, guess that it is part of this item
|
||
+ // and move on to the next line
|
||
+ if p.isEmpty(data[line:i]) > 0 {
|
||
+ containsBlankLine = true
|
||
+ line = i
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // calculate the indentation
|
||
+ indent := 0
|
||
+ indentIndex := 0
|
||
+ if data[line] == '\t' {
|
||
+ indentIndex++
|
||
+ indent += 4
|
||
+ } else {
|
||
+ for indent < 4 && line+indent < i && data[line+indent] == ' ' {
|
||
+ indent++
|
||
+ indentIndex++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ chunk := data[line+indentIndex : i]
|
||
+
|
||
+ if p.extensions&FencedCode != 0 {
|
||
+ // determine if in or out of codeblock
|
||
+ // if in codeblock, ignore normal list processing
|
||
+ _, marker := isFenceLine(chunk, nil, codeBlockMarker)
|
||
+ if marker != "" {
|
||
+ if codeBlockMarker == "" {
|
||
+ // start of codeblock
|
||
+ codeBlockMarker = marker
|
||
+ } else {
|
||
+ // end of codeblock.
|
||
+ codeBlockMarker = ""
|
||
+ }
|
||
+ }
|
||
+ // we are in a codeblock, write line, and continue
|
||
+ if codeBlockMarker != "" || marker != "" {
|
||
+ raw.Write(data[line+indentIndex : i])
|
||
+ line = i
|
||
+ continue gatherlines
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // evaluate how this line fits in
|
||
+ switch {
|
||
+ // is this a nested list item?
|
||
+ case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
|
||
+ p.oliPrefix(chunk) > 0 ||
|
||
+ p.dliPrefix(chunk) > 0:
|
||
+
|
||
+ // to be a nested list, it must be indented more
|
||
+ // if not, it is either a different kind of list
|
||
+ // or the next item in the same list
|
||
+ if indent <= itemIndent {
|
||
+ if p.listTypeChanged(chunk, flags) {
|
||
+ *flags |= ListItemEndOfList
|
||
+ } else if containsBlankLine {
|
||
+ *flags |= ListItemContainsBlock
|
||
+ }
|
||
+
|
||
+ break gatherlines
|
||
+ }
|
||
+
|
||
+ if containsBlankLine {
|
||
+ *flags |= ListItemContainsBlock
|
||
+ }
|
||
+
|
||
+ // is this the first item in the nested list?
|
||
+ if sublist == 0 {
|
||
+ sublist = raw.Len()
|
||
+ }
|
||
+
|
||
+ // is this a nested prefix heading?
|
||
+ case p.isPrefixHeading(chunk):
|
||
+ // if the heading is not indented, it is not nested in the list
|
||
+ // and thus ends the list
|
||
+ if containsBlankLine && indent < 4 {
|
||
+ *flags |= ListItemEndOfList
|
||
+ break gatherlines
|
||
+ }
|
||
+ *flags |= ListItemContainsBlock
|
||
+
|
||
+ // anything following an empty line is only part
|
||
+ // of this item if it is indented 4 spaces
|
||
+ // (regardless of the indentation of the beginning of the item)
|
||
+ case containsBlankLine && indent < 4:
|
||
+ if *flags&ListTypeDefinition != 0 && i < len(data)-1 {
|
||
+ // is the next item still a part of this list?
|
||
+ next := i
|
||
+ for next < len(data) && data[next] != '\n' {
|
||
+ next++
|
||
+ }
|
||
+ for next < len(data)-1 && data[next] == '\n' {
|
||
+ next++
|
||
+ }
|
||
+ if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
|
||
+ *flags |= ListItemEndOfList
|
||
+ }
|
||
+ } else {
|
||
+ *flags |= ListItemEndOfList
|
||
+ }
|
||
+ break gatherlines
|
||
+
|
||
+ // a blank line means this should be parsed as a block
|
||
+ case containsBlankLine:
|
||
+ raw.WriteByte('\n')
|
||
+ *flags |= ListItemContainsBlock
|
||
+ }
|
||
+
|
||
+ // if this line was preceded by one or more blanks,
|
||
+ // re-introduce the blank into the buffer
|
||
+ if containsBlankLine {
|
||
+ containsBlankLine = false
|
||
+ raw.WriteByte('\n')
|
||
+ }
|
||
+
|
||
+ // add the line into the working buffer without prefix
|
||
+ raw.Write(data[line+indentIndex : i])
|
||
+
|
||
+ line = i
|
||
+ }
|
||
+
|
||
+ rawBytes := raw.Bytes()
|
||
+
|
||
+ block := p.addBlock(Item, nil)
|
||
+ block.ListFlags = *flags
|
||
+ block.Tight = false
|
||
+ block.BulletChar = bulletChar
|
||
+ block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark
|
||
+
|
||
+ // render the contents of the list item
|
||
+ if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 {
|
||
+ // intermediate render of block item, except for definition term
|
||
+ if sublist > 0 {
|
||
+ p.block(rawBytes[:sublist])
|
||
+ p.block(rawBytes[sublist:])
|
||
+ } else {
|
||
+ p.block(rawBytes)
|
||
+ }
|
||
+ } else {
|
||
+ // intermediate render of inline item
|
||
+ if sublist > 0 {
|
||
+ child := p.addChild(Paragraph, 0)
|
||
+ child.content = rawBytes[:sublist]
|
||
+ p.block(rawBytes[sublist:])
|
||
+ } else {
|
||
+ child := p.addChild(Paragraph, 0)
|
||
+ child.content = rawBytes
|
||
+ }
|
||
+ }
|
||
+ return line
|
||
+}
|
||
+
|
||
+// render a single paragraph that has already been parsed out
|
||
+func (p *Markdown) renderParagraph(data []byte) {
|
||
+ if len(data) == 0 {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // trim leading spaces
|
||
+ beg := 0
|
||
+ for data[beg] == ' ' {
|
||
+ beg++
|
||
+ }
|
||
+
|
||
+ end := len(data)
|
||
+ // trim trailing newline
|
||
+ if data[len(data)-1] == '\n' {
|
||
+ end--
|
||
+ }
|
||
+
|
||
+ // trim trailing spaces
|
||
+ for end > beg && data[end-1] == ' ' {
|
||
+ end--
|
||
+ }
|
||
+
|
||
+ p.addBlock(Paragraph, data[beg:end])
|
||
+}
|
||
+
|
||
+func (p *Markdown) paragraph(data []byte) int {
|
||
+ // prev: index of 1st char of previous line
|
||
+ // line: index of 1st char of current line
|
||
+ // i: index of cursor/end of current line
|
||
+ var prev, line, i int
|
||
+ tabSize := TabSizeDefault
|
||
+ if p.extensions&TabSizeEight != 0 {
|
||
+ tabSize = TabSizeDouble
|
||
+ }
|
||
+ // keep going until we find something to mark the end of the paragraph
|
||
+ for i < len(data) {
|
||
+ // mark the beginning of the current line
|
||
+ prev = line
|
||
+ current := data[i:]
|
||
+ line = i
|
||
+
|
||
+ // did we find a reference or a footnote? If so, end a paragraph
|
||
+ // preceding it and report that we have consumed up to the end of that
|
||
+ // reference:
|
||
+ if refEnd := isReference(p, current, tabSize); refEnd > 0 {
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i + refEnd
|
||
+ }
|
||
+
|
||
+ // did we find a blank line marking the end of the paragraph?
|
||
+ if n := p.isEmpty(current); n > 0 {
|
||
+ // did this blank line followed by a definition list item?
|
||
+ if p.extensions&DefinitionLists != 0 {
|
||
+ if i < len(data)-1 && data[i+1] == ':' {
|
||
+ return p.list(data[prev:], ListTypeDefinition)
|
||
+ }
|
||
+ }
|
||
+
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i + n
|
||
+ }
|
||
+
|
||
+ // an underline under some text marks a heading, so our paragraph ended on prev line
|
||
+ if i > 0 {
|
||
+ if level := p.isUnderlinedHeading(current); level > 0 {
|
||
+ // render the paragraph
|
||
+ p.renderParagraph(data[:prev])
|
||
+
|
||
+ // ignore leading and trailing whitespace
|
||
+ eol := i - 1
|
||
+ for prev < eol && data[prev] == ' ' {
|
||
+ prev++
|
||
+ }
|
||
+ for eol > prev && data[eol-1] == ' ' {
|
||
+ eol--
|
||
+ }
|
||
+
|
||
+ id := ""
|
||
+ if p.extensions&AutoHeadingIDs != 0 {
|
||
+ id = SanitizedAnchorName(string(data[prev:eol]))
|
||
+ }
|
||
+
|
||
+ block := p.addBlock(Heading, data[prev:eol])
|
||
+ block.Level = level
|
||
+ block.HeadingID = id
|
||
+
|
||
+ // find the end of the underline
|
||
+ for i < len(data) && data[i] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+ return i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // if the next line starts a block of HTML, then the paragraph ends here
|
||
+ if p.extensions&LaxHTMLBlocks != 0 {
|
||
+ if data[i] == '<' && p.html(current, false) > 0 {
|
||
+ // rewind to before the HTML block
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // if there's a prefixed heading or a horizontal rule after this, paragraph is over
|
||
+ if p.isPrefixHeading(current) || p.isHRule(current) {
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i
|
||
+ }
|
||
+
|
||
+ // if there's a fenced code block, paragraph is over
|
||
+ if p.extensions&FencedCode != 0 {
|
||
+ if p.fencedCodeBlock(current, false) > 0 {
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // if there's a definition list item, prev line is a definition term
|
||
+ if p.extensions&DefinitionLists != 0 {
|
||
+ if p.dliPrefix(current) != 0 {
|
||
+ ret := p.list(data[prev:], ListTypeDefinition)
|
||
+ return ret
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // if there's a list after this, paragraph is over
|
||
+ if p.extensions&NoEmptyLineBeforeBlock != 0 {
|
||
+ if p.uliPrefix(current) != 0 ||
|
||
+ p.oliPrefix(current) != 0 ||
|
||
+ p.quotePrefix(current) != 0 ||
|
||
+ p.codePrefix(current) != 0 {
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // otherwise, scan to the beginning of the next line
|
||
+ nl := bytes.IndexByte(data[i:], '\n')
|
||
+ if nl >= 0 {
|
||
+ i += nl + 1
|
||
+ } else {
|
||
+ i += len(data[i:])
|
||
+ }
|
||
+ }
|
||
+
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i
|
||
+}
|
||
+
|
||
+func skipChar(data []byte, start int, char byte) int {
|
||
+ i := start
|
||
+ for i < len(data) && data[i] == char {
|
||
+ i++
|
||
+ }
|
||
+ return i
|
||
+}
|
||
+
|
||
+func skipUntilChar(text []byte, start int, char byte) int {
|
||
+ i := start
|
||
+ for i < len(text) && text[i] != char {
|
||
+ i++
|
||
+ }
|
||
+ return i
|
||
+}
|
||
+
|
||
+// SanitizedAnchorName returns a sanitized anchor name for the given text.
|
||
+//
|
||
+// It implements the algorithm specified in the package comment.
|
||
+func SanitizedAnchorName(text string) string {
|
||
+ var anchorName []rune
|
||
+ futureDash := false
|
||
+ for _, r := range text {
|
||
+ switch {
|
||
+ case unicode.IsLetter(r) || unicode.IsNumber(r):
|
||
+ if futureDash && len(anchorName) > 0 {
|
||
+ anchorName = append(anchorName, '-')
|
||
+ }
|
||
+ futureDash = false
|
||
+ anchorName = append(anchorName, unicode.ToLower(r))
|
||
+ default:
|
||
+ futureDash = true
|
||
+ }
|
||
+ }
|
||
+ return string(anchorName)
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go
|
||
new file mode 100644
|
||
index 000000000000..57ff152a0568
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/doc.go
|
||
@@ -0,0 +1,46 @@
|
||
+// Package blackfriday is a markdown processor.
|
||
+//
|
||
+// It translates plain text with simple formatting rules into an AST, which can
|
||
+// then be further processed to HTML (provided by Blackfriday itself) or other
|
||
+// formats (provided by the community).
|
||
+//
|
||
+// The simplest way to invoke Blackfriday is to call the Run function. It will
|
||
+// take a text input and produce a text output in HTML (or other format).
|
||
+//
|
||
+// A slightly more sophisticated way to use Blackfriday is to create a Markdown
|
||
+// processor and to call Parse, which returns a syntax tree for the input
|
||
+// document. You can leverage Blackfriday's parsing for content extraction from
|
||
+// markdown documents. You can assign a custom renderer and set various options
|
||
+// to the Markdown processor.
|
||
+//
|
||
+// If you're interested in calling Blackfriday from command line, see
|
||
+// https://github.com/russross/blackfriday-tool.
|
||
+//
|
||
+// Sanitized Anchor Names
|
||
+//
|
||
+// Blackfriday includes an algorithm for creating sanitized anchor names
|
||
+// corresponding to a given input text. This algorithm is used to create
|
||
+// anchors for headings when AutoHeadingIDs extension is enabled. The
|
||
+// algorithm is specified below, so that other packages can create
|
||
+// compatible anchor names and links to those anchors.
|
||
+//
|
||
+// The algorithm iterates over the input text, interpreted as UTF-8,
|
||
+// one Unicode code point (rune) at a time. All runes that are letters (category L)
|
||
+// or numbers (category N) are considered valid characters. They are mapped to
|
||
+// lower case, and included in the output. All other runes are considered
|
||
+// invalid characters. Invalid characters that precede the first valid character,
|
||
+// as well as invalid character that follow the last valid character
|
||
+// are dropped completely. All other sequences of invalid characters
|
||
+// between two valid characters are replaced with a single dash character '-'.
|
||
+//
|
||
+// SanitizedAnchorName exposes this functionality, and can be used to
|
||
+// create compatible links to the anchor names generated by blackfriday.
|
||
+// This algorithm is also implemented in a small standalone package at
|
||
+// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients
|
||
+// that want a small package and don't need full functionality of blackfriday.
|
||
+package blackfriday
|
||
+
|
||
+// NOTE: Keep Sanitized Anchor Name algorithm in sync with package
|
||
+// github.com/shurcooL/sanitized_anchor_name.
|
||
+// Otherwise, users of sanitized_anchor_name will get anchor names
|
||
+// that are incompatible with those generated by blackfriday.
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go
|
||
new file mode 100644
|
||
index 000000000000..a2c3edb691c8
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/entities.go
|
||
@@ -0,0 +1,2236 @@
|
||
+package blackfriday
|
||
+
|
||
+// Extracted from https://html.spec.whatwg.org/multipage/entities.json
|
||
+var entities = map[string]bool{
|
||
+ "Æ": true,
|
||
+ "Æ": true,
|
||
+ "&": true,
|
||
+ "&": true,
|
||
+ "Á": true,
|
||
+ "Á": true,
|
||
+ "Ă": true,
|
||
+ "Â": true,
|
||
+ "Â": true,
|
||
+ "А": true,
|
||
+ "𝔄": true,
|
||
+ "À": true,
|
||
+ "À": true,
|
||
+ "Α": true,
|
||
+ "Ā": true,
|
||
+ "⩓": true,
|
||
+ "Ą": true,
|
||
+ "𝔸": true,
|
||
+ "⁡": true,
|
||
+ "Å": true,
|
||
+ "Å": true,
|
||
+ "𝒜": true,
|
||
+ "≔": true,
|
||
+ "Ã": true,
|
||
+ "Ã": true,
|
||
+ "Ä": true,
|
||
+ "Ä": true,
|
||
+ "∖": true,
|
||
+ "⫧": true,
|
||
+ "⌆": true,
|
||
+ "Б": true,
|
||
+ "∵": true,
|
||
+ "ℬ": true,
|
||
+ "Β": true,
|
||
+ "𝔅": true,
|
||
+ "𝔹": true,
|
||
+ "˘": true,
|
||
+ "ℬ": true,
|
||
+ "≎": true,
|
||
+ "Ч": true,
|
||
+ "©": true,
|
||
+ "©": true,
|
||
+ "Ć": true,
|
||
+ "⋒": true,
|
||
+ "ⅅ": true,
|
||
+ "ℭ": true,
|
||
+ "Č": true,
|
||
+ "Ç": true,
|
||
+ "Ç": true,
|
||
+ "Ĉ": true,
|
||
+ "∰": true,
|
||
+ "Ċ": true,
|
||
+ "¸": true,
|
||
+ "·": true,
|
||
+ "ℭ": true,
|
||
+ "Χ": true,
|
||
+ "⊙": true,
|
||
+ "⊖": true,
|
||
+ "⊕": true,
|
||
+ "⊗": true,
|
||
+ "∲": true,
|
||
+ "”": true,
|
||
+ "’": true,
|
||
+ "∷": true,
|
||
+ "⩴": true,
|
||
+ "≡": true,
|
||
+ "∯": true,
|
||
+ "∮": true,
|
||
+ "ℂ": true,
|
||
+ "∐": true,
|
||
+ "∳": true,
|
||
+ "⨯": true,
|
||
+ "𝒞": true,
|
||
+ "⋓": true,
|
||
+ "≍": true,
|
||
+ "ⅅ": true,
|
||
+ "⤑": true,
|
||
+ "Ђ": true,
|
||
+ "Ѕ": true,
|
||
+ "Џ": true,
|
||
+ "‡": true,
|
||
+ "↡": true,
|
||
+ "⫤": true,
|
||
+ "Ď": true,
|
||
+ "Д": true,
|
||
+ "∇": true,
|
||
+ "Δ": true,
|
||
+ "𝔇": true,
|
||
+ "´": true,
|
||
+ "˙": true,
|
||
+ "˝": true,
|
||
+ "`": true,
|
||
+ "˜": true,
|
||
+ "⋄": true,
|
||
+ "ⅆ": true,
|
||
+ "𝔻": true,
|
||
+ "¨": true,
|
||
+ "⃜": true,
|
||
+ "≐": true,
|
||
+ "∯": true,
|
||
+ "¨": true,
|
||
+ "⇓": true,
|
||
+ "⇐": true,
|
||
+ "⇔": true,
|
||
+ "⫤": true,
|
||
+ "⟸": true,
|
||
+ "⟺": true,
|
||
+ "⟹": true,
|
||
+ "⇒": true,
|
||
+ "⊨": true,
|
||
+ "⇑": true,
|
||
+ "⇕": true,
|
||
+ "∥": true,
|
||
+ "↓": true,
|
||
+ "⤓": true,
|
||
+ "⇵": true,
|
||
+ "̑": true,
|
||
+ "⥐": true,
|
||
+ "⥞": true,
|
||
+ "↽": true,
|
||
+ "⥖": true,
|
||
+ "⥟": true,
|
||
+ "⇁": true,
|
||
+ "⥗": true,
|
||
+ "⊤": true,
|
||
+ "↧": true,
|
||
+ "⇓": true,
|
||
+ "𝒟": true,
|
||
+ "Đ": true,
|
||
+ "Ŋ": true,
|
||
+ "Ð": true,
|
||
+ "Ð": true,
|
||
+ "É": true,
|
||
+ "É": true,
|
||
+ "Ě": true,
|
||
+ "Ê": true,
|
||
+ "Ê": true,
|
||
+ "Э": true,
|
||
+ "Ė": true,
|
||
+ "𝔈": true,
|
||
+ "È": true,
|
||
+ "È": true,
|
||
+ "∈": true,
|
||
+ "Ē": true,
|
||
+ "◻": true,
|
||
+ "▫": true,
|
||
+ "Ę": true,
|
||
+ "𝔼": true,
|
||
+ "Ε": true,
|
||
+ "⩵": true,
|
||
+ "≂": true,
|
||
+ "⇌": true,
|
||
+ "ℰ": true,
|
||
+ "⩳": true,
|
||
+ "Η": true,
|
||
+ "Ë": true,
|
||
+ "Ë": true,
|
||
+ "∃": true,
|
||
+ "ⅇ": true,
|
||
+ "Ф": true,
|
||
+ "𝔉": true,
|
||
+ "◼": true,
|
||
+ "▪": true,
|
||
+ "𝔽": true,
|
||
+ "∀": true,
|
||
+ "ℱ": true,
|
||
+ "ℱ": true,
|
||
+ "Ѓ": true,
|
||
+ ">": true,
|
||
+ ">": true,
|
||
+ "Γ": true,
|
||
+ "Ϝ": true,
|
||
+ "Ğ": true,
|
||
+ "Ģ": true,
|
||
+ "Ĝ": true,
|
||
+ "Г": true,
|
||
+ "Ġ": true,
|
||
+ "𝔊": true,
|
||
+ "⋙": true,
|
||
+ "𝔾": true,
|
||
+ "≥": true,
|
||
+ "⋛": true,
|
||
+ "≧": true,
|
||
+ "⪢": true,
|
||
+ "≷": true,
|
||
+ "⩾": true,
|
||
+ "≳": true,
|
||
+ "𝒢": true,
|
||
+ "≫": true,
|
||
+ "Ъ": true,
|
||
+ "ˇ": true,
|
||
+ "^": true,
|
||
+ "Ĥ": true,
|
||
+ "ℌ": true,
|
||
+ "ℋ": true,
|
||
+ "ℍ": true,
|
||
+ "─": true,
|
||
+ "ℋ": true,
|
||
+ "Ħ": true,
|
||
+ "≎": true,
|
||
+ "≏": true,
|
||
+ "Е": true,
|
||
+ "IJ": true,
|
||
+ "Ё": true,
|
||
+ "Í": true,
|
||
+ "Í": true,
|
||
+ "Î": true,
|
||
+ "Î": true,
|
||
+ "И": true,
|
||
+ "İ": true,
|
||
+ "ℑ": true,
|
||
+ "Ì": true,
|
||
+ "Ì": true,
|
||
+ "ℑ": true,
|
||
+ "Ī": true,
|
||
+ "ⅈ": true,
|
||
+ "⇒": true,
|
||
+ "∬": true,
|
||
+ "∫": true,
|
||
+ "⋂": true,
|
||
+ "⁣": true,
|
||
+ "⁢": true,
|
||
+ "Į": true,
|
||
+ "𝕀": true,
|
||
+ "Ι": true,
|
||
+ "ℐ": true,
|
||
+ "Ĩ": true,
|
||
+ "І": true,
|
||
+ "Ï": true,
|
||
+ "Ï": true,
|
||
+ "Ĵ": true,
|
||
+ "Й": true,
|
||
+ "𝔍": true,
|
||
+ "𝕁": true,
|
||
+ "𝒥": true,
|
||
+ "Ј": true,
|
||
+ "Є": true,
|
||
+ "Х": true,
|
||
+ "Ќ": true,
|
||
+ "Κ": true,
|
||
+ "Ķ": true,
|
||
+ "К": true,
|
||
+ "𝔎": true,
|
||
+ "𝕂": true,
|
||
+ "𝒦": true,
|
||
+ "Љ": true,
|
||
+ "<": true,
|
||
+ "<": true,
|
||
+ "Ĺ": true,
|
||
+ "Λ": true,
|
||
+ "⟪": true,
|
||
+ "ℒ": true,
|
||
+ "↞": true,
|
||
+ "Ľ": true,
|
||
+ "Ļ": true,
|
||
+ "Л": true,
|
||
+ "⟨": true,
|
||
+ "←": true,
|
||
+ "⇤": true,
|
||
+ "⇆": true,
|
||
+ "⌈": true,
|
||
+ "⟦": true,
|
||
+ "⥡": true,
|
||
+ "⇃": true,
|
||
+ "⥙": true,
|
||
+ "⌊": true,
|
||
+ "↔": true,
|
||
+ "⥎": true,
|
||
+ "⊣": true,
|
||
+ "↤": true,
|
||
+ "⥚": true,
|
||
+ "⊲": true,
|
||
+ "⧏": true,
|
||
+ "⊴": true,
|
||
+ "⥑": true,
|
||
+ "⥠": true,
|
||
+ "↿": true,
|
||
+ "⥘": true,
|
||
+ "↼": true,
|
||
+ "⥒": true,
|
||
+ "⇐": true,
|
||
+ "⇔": true,
|
||
+ "⋚": true,
|
||
+ "≦": true,
|
||
+ "≶": true,
|
||
+ "⪡": true,
|
||
+ "⩽": true,
|
||
+ "≲": true,
|
||
+ "𝔏": true,
|
||
+ "⋘": true,
|
||
+ "⇚": true,
|
||
+ "Ŀ": true,
|
||
+ "⟵": true,
|
||
+ "⟷": true,
|
||
+ "⟶": true,
|
||
+ "⟸": true,
|
||
+ "⟺": true,
|
||
+ "⟹": true,
|
||
+ "𝕃": true,
|
||
+ "↙": true,
|
||
+ "↘": true,
|
||
+ "ℒ": true,
|
||
+ "↰": true,
|
||
+ "Ł": true,
|
||
+ "≪": true,
|
||
+ "⤅": true,
|
||
+ "М": true,
|
||
+ " ": true,
|
||
+ "ℳ": true,
|
||
+ "𝔐": true,
|
||
+ "∓": true,
|
||
+ "𝕄": true,
|
||
+ "ℳ": true,
|
||
+ "Μ": true,
|
||
+ "Њ": true,
|
||
+ "Ń": true,
|
||
+ "Ň": true,
|
||
+ "Ņ": true,
|
||
+ "Н": true,
|
||
+ "​": true,
|
||
+ "​": true,
|
||
+ "​": true,
|
||
+ "​": true,
|
||
+ "≫": true,
|
||
+ "≪": true,
|
||
+ "
": true,
|
||
+ "𝔑": true,
|
||
+ "⁠": true,
|
||
+ " ": true,
|
||
+ "ℕ": true,
|
||
+ "⫬": true,
|
||
+ "≢": true,
|
||
+ "≭": true,
|
||
+ "∦": true,
|
||
+ "∉": true,
|
||
+ "≠": true,
|
||
+ "≂̸": true,
|
||
+ "∄": true,
|
||
+ "≯": true,
|
||
+ "≱": true,
|
||
+ "≧̸": true,
|
||
+ "≫̸": true,
|
||
+ "≹": true,
|
||
+ "⩾̸": true,
|
||
+ "≵": true,
|
||
+ "≎̸": true,
|
||
+ "≏̸": true,
|
||
+ "⋪": true,
|
||
+ "⧏̸": true,
|
||
+ "⋬": true,
|
||
+ "≮": true,
|
||
+ "≰": true,
|
||
+ "≸": true,
|
||
+ "≪̸": true,
|
||
+ "⩽̸": true,
|
||
+ "≴": true,
|
||
+ "⪢̸": true,
|
||
+ "⪡̸": true,
|
||
+ "⊀": true,
|
||
+ "⪯̸": true,
|
||
+ "⋠": true,
|
||
+ "∌": true,
|
||
+ "⋫": true,
|
||
+ "⧐̸": true,
|
||
+ "⋭": true,
|
||
+ "⊏̸": true,
|
||
+ "⋢": true,
|
||
+ "⊐̸": true,
|
||
+ "⋣": true,
|
||
+ "⊂⃒": true,
|
||
+ "⊈": true,
|
||
+ "⊁": true,
|
||
+ "⪰̸": true,
|
||
+ "⋡": true,
|
||
+ "≿̸": true,
|
||
+ "⊃⃒": true,
|
||
+ "⊉": true,
|
||
+ "≁": true,
|
||
+ "≄": true,
|
||
+ "≇": true,
|
||
+ "≉": true,
|
||
+ "∤": true,
|
||
+ "𝒩": true,
|
||
+ "Ñ": true,
|
||
+ "Ñ": true,
|
||
+ "Ν": true,
|
||
+ "Œ": true,
|
||
+ "Ó": true,
|
||
+ "Ó": true,
|
||
+ "Ô": true,
|
||
+ "Ô": true,
|
||
+ "О": true,
|
||
+ "Ő": true,
|
||
+ "𝔒": true,
|
||
+ "Ò": true,
|
||
+ "Ò": true,
|
||
+ "Ō": true,
|
||
+ "Ω": true,
|
||
+ "Ο": true,
|
||
+ "𝕆": true,
|
||
+ "“": true,
|
||
+ "‘": true,
|
||
+ "⩔": true,
|
||
+ "𝒪": true,
|
||
+ "Ø": true,
|
||
+ "Ø": true,
|
||
+ "Õ": true,
|
||
+ "Õ": true,
|
||
+ "⨷": true,
|
||
+ "Ö": true,
|
||
+ "Ö": true,
|
||
+ "‾": true,
|
||
+ "⏞": true,
|
||
+ "⎴": true,
|
||
+ "⏜": true,
|
||
+ "∂": true,
|
||
+ "П": true,
|
||
+ "𝔓": true,
|
||
+ "Φ": true,
|
||
+ "Π": true,
|
||
+ "±": true,
|
||
+ "ℌ": true,
|
||
+ "ℙ": true,
|
||
+ "⪻": true,
|
||
+ "≺": true,
|
||
+ "⪯": true,
|
||
+ "≼": true,
|
||
+ "≾": true,
|
||
+ "″": true,
|
||
+ "∏": true,
|
||
+ "∷": true,
|
||
+ "∝": true,
|
||
+ "𝒫": true,
|
||
+ "Ψ": true,
|
||
+ """: true,
|
||
+ """: true,
|
||
+ "𝔔": true,
|
||
+ "ℚ": true,
|
||
+ "𝒬": true,
|
||
+ "⤐": true,
|
||
+ "®": true,
|
||
+ "®": true,
|
||
+ "Ŕ": true,
|
||
+ "⟫": true,
|
||
+ "↠": true,
|
||
+ "⤖": true,
|
||
+ "Ř": true,
|
||
+ "Ŗ": true,
|
||
+ "Р": true,
|
||
+ "ℜ": true,
|
||
+ "∋": true,
|
||
+ "⇋": true,
|
||
+ "⥯": true,
|
||
+ "ℜ": true,
|
||
+ "Ρ": true,
|
||
+ "⟩": true,
|
||
+ "→": true,
|
||
+ "⇥": true,
|
||
+ "⇄": true,
|
||
+ "⌉": true,
|
||
+ "⟧": true,
|
||
+ "⥝": true,
|
||
+ "⇂": true,
|
||
+ "⥕": true,
|
||
+ "⌋": true,
|
||
+ "⊢": true,
|
||
+ "↦": true,
|
||
+ "⥛": true,
|
||
+ "⊳": true,
|
||
+ "⧐": true,
|
||
+ "⊵": true,
|
||
+ "⥏": true,
|
||
+ "⥜": true,
|
||
+ "↾": true,
|
||
+ "⥔": true,
|
||
+ "⇀": true,
|
||
+ "⥓": true,
|
||
+ "⇒": true,
|
||
+ "ℝ": true,
|
||
+ "⥰": true,
|
||
+ "⇛": true,
|
||
+ "ℛ": true,
|
||
+ "↱": true,
|
||
+ "⧴": true,
|
||
+ "Щ": true,
|
||
+ "Ш": true,
|
||
+ "Ь": true,
|
||
+ "Ś": true,
|
||
+ "⪼": true,
|
||
+ "Š": true,
|
||
+ "Ş": true,
|
||
+ "Ŝ": true,
|
||
+ "С": true,
|
||
+ "𝔖": true,
|
||
+ "↓": true,
|
||
+ "←": true,
|
||
+ "→": true,
|
||
+ "↑": true,
|
||
+ "Σ": true,
|
||
+ "∘": true,
|
||
+ "𝕊": true,
|
||
+ "√": true,
|
||
+ "□": true,
|
||
+ "⊓": true,
|
||
+ "⊏": true,
|
||
+ "⊑": true,
|
||
+ "⊐": true,
|
||
+ "⊒": true,
|
||
+ "⊔": true,
|
||
+ "𝒮": true,
|
||
+ "⋆": true,
|
||
+ "⋐": true,
|
||
+ "⋐": true,
|
||
+ "⊆": true,
|
||
+ "≻": true,
|
||
+ "⪰": true,
|
||
+ "≽": true,
|
||
+ "≿": true,
|
||
+ "∋": true,
|
||
+ "∑": true,
|
||
+ "⋑": true,
|
||
+ "⊃": true,
|
||
+ "⊇": true,
|
||
+ "⋑": true,
|
||
+ "Þ": true,
|
||
+ "Þ": true,
|
||
+ "™": true,
|
||
+ "Ћ": true,
|
||
+ "Ц": true,
|
||
+ "	": true,
|
||
+ "Τ": true,
|
||
+ "Ť": true,
|
||
+ "Ţ": true,
|
||
+ "Т": true,
|
||
+ "𝔗": true,
|
||
+ "∴": true,
|
||
+ "Θ": true,
|
||
+ "  ": true,
|
||
+ " ": true,
|
||
+ "∼": true,
|
||
+ "≃": true,
|
||
+ "≅": true,
|
||
+ "≈": true,
|
||
+ "𝕋": true,
|
||
+ "⃛": true,
|
||
+ "𝒯": true,
|
||
+ "Ŧ": true,
|
||
+ "Ú": true,
|
||
+ "Ú": true,
|
||
+ "↟": true,
|
||
+ "⥉": true,
|
||
+ "Ў": true,
|
||
+ "Ŭ": true,
|
||
+ "Û": true,
|
||
+ "Û": true,
|
||
+ "У": true,
|
||
+ "Ű": true,
|
||
+ "𝔘": true,
|
||
+ "Ù": true,
|
||
+ "Ù": true,
|
||
+ "Ū": true,
|
||
+ "_": true,
|
||
+ "⏟": true,
|
||
+ "⎵": true,
|
||
+ "⏝": true,
|
||
+ "⋃": true,
|
||
+ "⊎": true,
|
||
+ "Ų": true,
|
||
+ "𝕌": true,
|
||
+ "↑": true,
|
||
+ "⤒": true,
|
||
+ "⇅": true,
|
||
+ "↕": true,
|
||
+ "⥮": true,
|
||
+ "⊥": true,
|
||
+ "↥": true,
|
||
+ "⇑": true,
|
||
+ "⇕": true,
|
||
+ "↖": true,
|
||
+ "↗": true,
|
||
+ "ϒ": true,
|
||
+ "Υ": true,
|
||
+ "Ů": true,
|
||
+ "𝒰": true,
|
||
+ "Ũ": true,
|
||
+ "Ü": true,
|
||
+ "Ü": true,
|
||
+ "⊫": true,
|
||
+ "⫫": true,
|
||
+ "В": true,
|
||
+ "⊩": true,
|
||
+ "⫦": true,
|
||
+ "⋁": true,
|
||
+ "‖": true,
|
||
+ "‖": true,
|
||
+ "∣": true,
|
||
+ "|": true,
|
||
+ "❘": true,
|
||
+ "≀": true,
|
||
+ " ": true,
|
||
+ "𝔙": true,
|
||
+ "𝕍": true,
|
||
+ "𝒱": true,
|
||
+ "⊪": true,
|
||
+ "Ŵ": true,
|
||
+ "⋀": true,
|
||
+ "𝔚": true,
|
||
+ "𝕎": true,
|
||
+ "𝒲": true,
|
||
+ "𝔛": true,
|
||
+ "Ξ": true,
|
||
+ "𝕏": true,
|
||
+ "𝒳": true,
|
||
+ "Я": true,
|
||
+ "Ї": true,
|
||
+ "Ю": true,
|
||
+ "Ý": true,
|
||
+ "Ý": true,
|
||
+ "Ŷ": true,
|
||
+ "Ы": true,
|
||
+ "𝔜": true,
|
||
+ "𝕐": true,
|
||
+ "𝒴": true,
|
||
+ "Ÿ": true,
|
||
+ "Ж": true,
|
||
+ "Ź": true,
|
||
+ "Ž": true,
|
||
+ "З": true,
|
||
+ "Ż": true,
|
||
+ "​": true,
|
||
+ "Ζ": true,
|
||
+ "ℨ": true,
|
||
+ "ℤ": true,
|
||
+ "𝒵": true,
|
||
+ "á": true,
|
||
+ "á": true,
|
||
+ "ă": true,
|
||
+ "∾": true,
|
||
+ "∾̳": true,
|
||
+ "∿": true,
|
||
+ "â": true,
|
||
+ "â": true,
|
||
+ "´": true,
|
||
+ "´": true,
|
||
+ "а": true,
|
||
+ "æ": true,
|
||
+ "æ": true,
|
||
+ "⁡": true,
|
||
+ "𝔞": true,
|
||
+ "à": true,
|
||
+ "à": true,
|
||
+ "ℵ": true,
|
||
+ "ℵ": true,
|
||
+ "α": true,
|
||
+ "ā": true,
|
||
+ "⨿": true,
|
||
+ "&": true,
|
||
+ "&": true,
|
||
+ "∧": true,
|
||
+ "⩕": true,
|
||
+ "⩜": true,
|
||
+ "⩘": true,
|
||
+ "⩚": true,
|
||
+ "∠": true,
|
||
+ "⦤": true,
|
||
+ "∠": true,
|
||
+ "∡": true,
|
||
+ "⦨": true,
|
||
+ "⦩": true,
|
||
+ "⦪": true,
|
||
+ "⦫": true,
|
||
+ "⦬": true,
|
||
+ "⦭": true,
|
||
+ "⦮": true,
|
||
+ "⦯": true,
|
||
+ "∟": true,
|
||
+ "⊾": true,
|
||
+ "⦝": true,
|
||
+ "∢": true,
|
||
+ "Å": true,
|
||
+ "⍼": true,
|
||
+ "ą": true,
|
||
+ "𝕒": true,
|
||
+ "≈": true,
|
||
+ "⩰": true,
|
||
+ "⩯": true,
|
||
+ "≊": true,
|
||
+ "≋": true,
|
||
+ "'": true,
|
||
+ "≈": true,
|
||
+ "≊": true,
|
||
+ "å": true,
|
||
+ "å": true,
|
||
+ "𝒶": true,
|
||
+ "*": true,
|
||
+ "≈": true,
|
||
+ "≍": true,
|
||
+ "ã": true,
|
||
+ "ã": true,
|
||
+ "ä": true,
|
||
+ "ä": true,
|
||
+ "∳": true,
|
||
+ "⨑": true,
|
||
+ "⫭": true,
|
||
+ "≌": true,
|
||
+ "϶": true,
|
||
+ "‵": true,
|
||
+ "∽": true,
|
||
+ "⋍": true,
|
||
+ "⊽": true,
|
||
+ "⌅": true,
|
||
+ "⌅": true,
|
||
+ "⎵": true,
|
||
+ "⎶": true,
|
||
+ "≌": true,
|
||
+ "б": true,
|
||
+ "„": true,
|
||
+ "∵": true,
|
||
+ "∵": true,
|
||
+ "⦰": true,
|
||
+ "϶": true,
|
||
+ "ℬ": true,
|
||
+ "β": true,
|
||
+ "ℶ": true,
|
||
+ "≬": true,
|
||
+ "𝔟": true,
|
||
+ "⋂": true,
|
||
+ "◯": true,
|
||
+ "⋃": true,
|
||
+ "⨀": true,
|
||
+ "⨁": true,
|
||
+ "⨂": true,
|
||
+ "⨆": true,
|
||
+ "★": true,
|
||
+ "▽": true,
|
||
+ "△": true,
|
||
+ "⨄": true,
|
||
+ "⋁": true,
|
||
+ "⋀": true,
|
||
+ "⤍": true,
|
||
+ "⧫": true,
|
||
+ "▪": true,
|
||
+ "▴": true,
|
||
+ "▾": true,
|
||
+ "◂": true,
|
||
+ "▸": true,
|
||
+ "␣": true,
|
||
+ "▒": true,
|
||
+ "░": true,
|
||
+ "▓": true,
|
||
+ "█": true,
|
||
+ "=⃥": true,
|
||
+ "≡⃥": true,
|
||
+ "⌐": true,
|
||
+ "𝕓": true,
|
||
+ "⊥": true,
|
||
+ "⊥": true,
|
||
+ "⋈": true,
|
||
+ "╗": true,
|
||
+ "╔": true,
|
||
+ "╖": true,
|
||
+ "╓": true,
|
||
+ "═": true,
|
||
+ "╦": true,
|
||
+ "╩": true,
|
||
+ "╤": true,
|
||
+ "╧": true,
|
||
+ "╝": true,
|
||
+ "╚": true,
|
||
+ "╜": true,
|
||
+ "╙": true,
|
||
+ "║": true,
|
||
+ "╬": true,
|
||
+ "╣": true,
|
||
+ "╠": true,
|
||
+ "╫": true,
|
||
+ "╢": true,
|
||
+ "╟": true,
|
||
+ "⧉": true,
|
||
+ "╕": true,
|
||
+ "╒": true,
|
||
+ "┐": true,
|
||
+ "┌": true,
|
||
+ "─": true,
|
||
+ "╥": true,
|
||
+ "╨": true,
|
||
+ "┬": true,
|
||
+ "┴": true,
|
||
+ "⊟": true,
|
||
+ "⊞": true,
|
||
+ "⊠": true,
|
||
+ "╛": true,
|
||
+ "╘": true,
|
||
+ "┘": true,
|
||
+ "└": true,
|
||
+ "│": true,
|
||
+ "╪": true,
|
||
+ "╡": true,
|
||
+ "╞": true,
|
||
+ "┼": true,
|
||
+ "┤": true,
|
||
+ "├": true,
|
||
+ "‵": true,
|
||
+ "˘": true,
|
||
+ "¦": true,
|
||
+ "¦": true,
|
||
+ "𝒷": true,
|
||
+ "⁏": true,
|
||
+ "∽": true,
|
||
+ "⋍": true,
|
||
+ "\": true,
|
||
+ "⧅": true,
|
||
+ "⟈": true,
|
||
+ "•": true,
|
||
+ "•": true,
|
||
+ "≎": true,
|
||
+ "⪮": true,
|
||
+ "≏": true,
|
||
+ "≏": true,
|
||
+ "ć": true,
|
||
+ "∩": true,
|
||
+ "⩄": true,
|
||
+ "⩉": true,
|
||
+ "⩋": true,
|
||
+ "⩇": true,
|
||
+ "⩀": true,
|
||
+ "∩︀": true,
|
||
+ "⁁": true,
|
||
+ "ˇ": true,
|
||
+ "⩍": true,
|
||
+ "č": true,
|
||
+ "ç": true,
|
||
+ "ç": true,
|
||
+ "ĉ": true,
|
||
+ "⩌": true,
|
||
+ "⩐": true,
|
||
+ "ċ": true,
|
||
+ "¸": true,
|
||
+ "¸": true,
|
||
+ "⦲": true,
|
||
+ "¢": true,
|
||
+ "¢": true,
|
||
+ "·": true,
|
||
+ "𝔠": true,
|
||
+ "ч": true,
|
||
+ "✓": true,
|
||
+ "✓": true,
|
||
+ "χ": true,
|
||
+ "○": true,
|
||
+ "⧃": true,
|
||
+ "ˆ": true,
|
||
+ "≗": true,
|
||
+ "↺": true,
|
||
+ "↻": true,
|
||
+ "®": true,
|
||
+ "Ⓢ": true,
|
||
+ "⊛": true,
|
||
+ "⊚": true,
|
||
+ "⊝": true,
|
||
+ "≗": true,
|
||
+ "⨐": true,
|
||
+ "⫯": true,
|
||
+ "⧂": true,
|
||
+ "♣": true,
|
||
+ "♣": true,
|
||
+ ":": true,
|
||
+ "≔": true,
|
||
+ "≔": true,
|
||
+ ",": true,
|
||
+ "@": true,
|
||
+ "∁": true,
|
||
+ "∘": true,
|
||
+ "∁": true,
|
||
+ "ℂ": true,
|
||
+ "≅": true,
|
||
+ "⩭": true,
|
||
+ "∮": true,
|
||
+ "𝕔": true,
|
||
+ "∐": true,
|
||
+ "©": true,
|
||
+ "©": true,
|
||
+ "℗": true,
|
||
+ "↵": true,
|
||
+ "✗": true,
|
||
+ "𝒸": true,
|
||
+ "⫏": true,
|
||
+ "⫑": true,
|
||
+ "⫐": true,
|
||
+ "⫒": true,
|
||
+ "⋯": true,
|
||
+ "⤸": true,
|
||
+ "⤵": true,
|
||
+ "⋞": true,
|
||
+ "⋟": true,
|
||
+ "↶": true,
|
||
+ "⤽": true,
|
||
+ "∪": true,
|
||
+ "⩈": true,
|
||
+ "⩆": true,
|
||
+ "⩊": true,
|
||
+ "⊍": true,
|
||
+ "⩅": true,
|
||
+ "∪︀": true,
|
||
+ "↷": true,
|
||
+ "⤼": true,
|
||
+ "⋞": true,
|
||
+ "⋟": true,
|
||
+ "⋎": true,
|
||
+ "⋏": true,
|
||
+ "¤": true,
|
||
+ "¤": true,
|
||
+ "↶": true,
|
||
+ "↷": true,
|
||
+ "⋎": true,
|
||
+ "⋏": true,
|
||
+ "∲": true,
|
||
+ "∱": true,
|
||
+ "⌭": true,
|
||
+ "⇓": true,
|
||
+ "⥥": true,
|
||
+ "†": true,
|
||
+ "ℸ": true,
|
||
+ "↓": true,
|
||
+ "‐": true,
|
||
+ "⊣": true,
|
||
+ "⤏": true,
|
||
+ "˝": true,
|
||
+ "ď": true,
|
||
+ "д": true,
|
||
+ "ⅆ": true,
|
||
+ "‡": true,
|
||
+ "⇊": true,
|
||
+ "⩷": true,
|
||
+ "°": true,
|
||
+ "°": true,
|
||
+ "δ": true,
|
||
+ "⦱": true,
|
||
+ "⥿": true,
|
||
+ "𝔡": true,
|
||
+ "⇃": true,
|
||
+ "⇂": true,
|
||
+ "⋄": true,
|
||
+ "⋄": true,
|
||
+ "♦": true,
|
||
+ "♦": true,
|
||
+ "¨": true,
|
||
+ "ϝ": true,
|
||
+ "⋲": true,
|
||
+ "÷": true,
|
||
+ "÷": true,
|
||
+ "÷": true,
|
||
+ "⋇": true,
|
||
+ "⋇": true,
|
||
+ "ђ": true,
|
||
+ "⌞": true,
|
||
+ "⌍": true,
|
||
+ "$": true,
|
||
+ "𝕕": true,
|
||
+ "˙": true,
|
||
+ "≐": true,
|
||
+ "≑": true,
|
||
+ "∸": true,
|
||
+ "∔": true,
|
||
+ "⊡": true,
|
||
+ "⌆": true,
|
||
+ "↓": true,
|
||
+ "⇊": true,
|
||
+ "⇃": true,
|
||
+ "⇂": true,
|
||
+ "⤐": true,
|
||
+ "⌟": true,
|
||
+ "⌌": true,
|
||
+ "𝒹": true,
|
||
+ "ѕ": true,
|
||
+ "⧶": true,
|
||
+ "đ": true,
|
||
+ "⋱": true,
|
||
+ "▿": true,
|
||
+ "▾": true,
|
||
+ "⇵": true,
|
||
+ "⥯": true,
|
||
+ "⦦": true,
|
||
+ "џ": true,
|
||
+ "⟿": true,
|
||
+ "⩷": true,
|
||
+ "≑": true,
|
||
+ "é": true,
|
||
+ "é": true,
|
||
+ "⩮": true,
|
||
+ "ě": true,
|
||
+ "≖": true,
|
||
+ "ê": true,
|
||
+ "ê": true,
|
||
+ "≕": true,
|
||
+ "э": true,
|
||
+ "ė": true,
|
||
+ "ⅇ": true,
|
||
+ "≒": true,
|
||
+ "𝔢": true,
|
||
+ "⪚": true,
|
||
+ "è": true,
|
||
+ "è": true,
|
||
+ "⪖": true,
|
||
+ "⪘": true,
|
||
+ "⪙": true,
|
||
+ "⏧": true,
|
||
+ "ℓ": true,
|
||
+ "⪕": true,
|
||
+ "⪗": true,
|
||
+ "ē": true,
|
||
+ "∅": true,
|
||
+ "∅": true,
|
||
+ "∅": true,
|
||
+ " ": true,
|
||
+ " ": true,
|
||
+ " ": true,
|
||
+ "ŋ": true,
|
||
+ " ": true,
|
||
+ "ę": true,
|
||
+ "𝕖": true,
|
||
+ "⋕": true,
|
||
+ "⧣": true,
|
||
+ "⩱": true,
|
||
+ "ε": true,
|
||
+ "ε": true,
|
||
+ "ϵ": true,
|
||
+ "≖": true,
|
||
+ "≕": true,
|
||
+ "≂": true,
|
||
+ "⪖": true,
|
||
+ "⪕": true,
|
||
+ "=": true,
|
||
+ "≟": true,
|
||
+ "≡": true,
|
||
+ "⩸": true,
|
||
+ "⧥": true,
|
||
+ "≓": true,
|
||
+ "⥱": true,
|
||
+ "ℯ": true,
|
||
+ "≐": true,
|
||
+ "≂": true,
|
||
+ "η": true,
|
||
+ "ð": true,
|
||
+ "ð": true,
|
||
+ "ë": true,
|
||
+ "ë": true,
|
||
+ "€": true,
|
||
+ "!": true,
|
||
+ "∃": true,
|
||
+ "ℰ": true,
|
||
+ "ⅇ": true,
|
||
+ "≒": true,
|
||
+ "ф": true,
|
||
+ "♀": true,
|
||
+ "ffi": true,
|
||
+ "ff": true,
|
||
+ "ffl": true,
|
||
+ "𝔣": true,
|
||
+ "fi": true,
|
||
+ "fj": true,
|
||
+ "♭": true,
|
||
+ "fl": true,
|
||
+ "▱": true,
|
||
+ "ƒ": true,
|
||
+ "𝕗": true,
|
||
+ "∀": true,
|
||
+ "⋔": true,
|
||
+ "⫙": true,
|
||
+ "⨍": true,
|
||
+ "½": true,
|
||
+ "½": true,
|
||
+ "⅓": true,
|
||
+ "¼": true,
|
||
+ "¼": true,
|
||
+ "⅕": true,
|
||
+ "⅙": true,
|
||
+ "⅛": true,
|
||
+ "⅔": true,
|
||
+ "⅖": true,
|
||
+ "¾": true,
|
||
+ "¾": true,
|
||
+ "⅗": true,
|
||
+ "⅜": true,
|
||
+ "⅘": true,
|
||
+ "⅚": true,
|
||
+ "⅝": true,
|
||
+ "⅞": true,
|
||
+ "⁄": true,
|
||
+ "⌢": true,
|
||
+ "𝒻": true,
|
||
+ "≧": true,
|
||
+ "⪌": true,
|
||
+ "ǵ": true,
|
||
+ "γ": true,
|
||
+ "ϝ": true,
|
||
+ "⪆": true,
|
||
+ "ğ": true,
|
||
+ "ĝ": true,
|
||
+ "г": true,
|
||
+ "ġ": true,
|
||
+ "≥": true,
|
||
+ "⋛": true,
|
||
+ "≥": true,
|
||
+ "≧": true,
|
||
+ "⩾": true,
|
||
+ "⩾": true,
|
||
+ "⪩": true,
|
||
+ "⪀": true,
|
||
+ "⪂": true,
|
||
+ "⪄": true,
|
||
+ "⋛︀": true,
|
||
+ "⪔": true,
|
||
+ "𝔤": true,
|
||
+ "≫": true,
|
||
+ "⋙": true,
|
||
+ "ℷ": true,
|
||
+ "ѓ": true,
|
||
+ "≷": true,
|
||
+ "⪒": true,
|
||
+ "⪥": true,
|
||
+ "⪤": true,
|
||
+ "≩": true,
|
||
+ "⪊": true,
|
||
+ "⪊": true,
|
||
+ "⪈": true,
|
||
+ "⪈": true,
|
||
+ "≩": true,
|
||
+ "⋧": true,
|
||
+ "𝕘": true,
|
||
+ "`": true,
|
||
+ "ℊ": true,
|
||
+ "≳": true,
|
||
+ "⪎": true,
|
||
+ "⪐": true,
|
||
+ ">": true,
|
||
+ ">": true,
|
||
+ "⪧": true,
|
||
+ "⩺": true,
|
||
+ "⋗": true,
|
||
+ "⦕": true,
|
||
+ "⩼": true,
|
||
+ "⪆": true,
|
||
+ "⥸": true,
|
||
+ "⋗": true,
|
||
+ "⋛": true,
|
||
+ "⪌": true,
|
||
+ "≷": true,
|
||
+ "≳": true,
|
||
+ "≩︀": true,
|
||
+ "≩︀": true,
|
||
+ "⇔": true,
|
||
+ " ": true,
|
||
+ "½": true,
|
||
+ "ℋ": true,
|
||
+ "ъ": true,
|
||
+ "↔": true,
|
||
+ "⥈": true,
|
||
+ "↭": true,
|
||
+ "ℏ": true,
|
||
+ "ĥ": true,
|
||
+ "♥": true,
|
||
+ "♥": true,
|
||
+ "…": true,
|
||
+ "⊹": true,
|
||
+ "𝔥": true,
|
||
+ "⤥": true,
|
||
+ "⤦": true,
|
||
+ "⇿": true,
|
||
+ "∻": true,
|
||
+ "↩": true,
|
||
+ "↪": true,
|
||
+ "𝕙": true,
|
||
+ "―": true,
|
||
+ "𝒽": true,
|
||
+ "ℏ": true,
|
||
+ "ħ": true,
|
||
+ "⁃": true,
|
||
+ "‐": true,
|
||
+ "í": true,
|
||
+ "í": true,
|
||
+ "⁣": true,
|
||
+ "î": true,
|
||
+ "î": true,
|
||
+ "и": true,
|
||
+ "е": true,
|
||
+ "¡": true,
|
||
+ "¡": true,
|
||
+ "⇔": true,
|
||
+ "𝔦": true,
|
||
+ "ì": true,
|
||
+ "ì": true,
|
||
+ "ⅈ": true,
|
||
+ "⨌": true,
|
||
+ "∭": true,
|
||
+ "⧜": true,
|
||
+ "℩": true,
|
||
+ "ij": true,
|
||
+ "ī": true,
|
||
+ "ℑ": true,
|
||
+ "ℐ": true,
|
||
+ "ℑ": true,
|
||
+ "ı": true,
|
||
+ "⊷": true,
|
||
+ "Ƶ": true,
|
||
+ "∈": true,
|
||
+ "℅": true,
|
||
+ "∞": true,
|
||
+ "⧝": true,
|
||
+ "ı": true,
|
||
+ "∫": true,
|
||
+ "⊺": true,
|
||
+ "ℤ": true,
|
||
+ "⊺": true,
|
||
+ "⨗": true,
|
||
+ "⨼": true,
|
||
+ "ё": true,
|
||
+ "į": true,
|
||
+ "𝕚": true,
|
||
+ "ι": true,
|
||
+ "⨼": true,
|
||
+ "¿": true,
|
||
+ "¿": true,
|
||
+ "𝒾": true,
|
||
+ "∈": true,
|
||
+ "⋹": true,
|
||
+ "⋵": true,
|
||
+ "⋴": true,
|
||
+ "⋳": true,
|
||
+ "∈": true,
|
||
+ "⁢": true,
|
||
+ "ĩ": true,
|
||
+ "і": true,
|
||
+ "ï": true,
|
||
+ "ï": true,
|
||
+ "ĵ": true,
|
||
+ "й": true,
|
||
+ "𝔧": true,
|
||
+ "ȷ": true,
|
||
+ "𝕛": true,
|
||
+ "𝒿": true,
|
||
+ "ј": true,
|
||
+ "є": true,
|
||
+ "κ": true,
|
||
+ "ϰ": true,
|
||
+ "ķ": true,
|
||
+ "к": true,
|
||
+ "𝔨": true,
|
||
+ "ĸ": true,
|
||
+ "х": true,
|
||
+ "ќ": true,
|
||
+ "𝕜": true,
|
||
+ "𝓀": true,
|
||
+ "⇚": true,
|
||
+ "⇐": true,
|
||
+ "⤛": true,
|
||
+ "⤎": true,
|
||
+ "≦": true,
|
||
+ "⪋": true,
|
||
+ "⥢": true,
|
||
+ "ĺ": true,
|
||
+ "⦴": true,
|
||
+ "ℒ": true,
|
||
+ "λ": true,
|
||
+ "⟨": true,
|
||
+ "⦑": true,
|
||
+ "⟨": true,
|
||
+ "⪅": true,
|
||
+ "«": true,
|
||
+ "«": true,
|
||
+ "←": true,
|
||
+ "⇤": true,
|
||
+ "⤟": true,
|
||
+ "⤝": true,
|
||
+ "↩": true,
|
||
+ "↫": true,
|
||
+ "⤹": true,
|
||
+ "⥳": true,
|
||
+ "↢": true,
|
||
+ "⪫": true,
|
||
+ "⤙": true,
|
||
+ "⪭": true,
|
||
+ "⪭︀": true,
|
||
+ "⤌": true,
|
||
+ "❲": true,
|
||
+ "{": true,
|
||
+ "[": true,
|
||
+ "⦋": true,
|
||
+ "⦏": true,
|
||
+ "⦍": true,
|
||
+ "ľ": true,
|
||
+ "ļ": true,
|
||
+ "⌈": true,
|
||
+ "{": true,
|
||
+ "л": true,
|
||
+ "⤶": true,
|
||
+ "“": true,
|
||
+ "„": true,
|
||
+ "⥧": true,
|
||
+ "⥋": true,
|
||
+ "↲": true,
|
||
+ "≤": true,
|
||
+ "←": true,
|
||
+ "↢": true,
|
||
+ "↽": true,
|
||
+ "↼": true,
|
||
+ "⇇": true,
|
||
+ "↔": true,
|
||
+ "⇆": true,
|
||
+ "⇋": true,
|
||
+ "↭": true,
|
||
+ "⋋": true,
|
||
+ "⋚": true,
|
||
+ "≤": true,
|
||
+ "≦": true,
|
||
+ "⩽": true,
|
||
+ "⩽": true,
|
||
+ "⪨": true,
|
||
+ "⩿": true,
|
||
+ "⪁": true,
|
||
+ "⪃": true,
|
||
+ "⋚︀": true,
|
||
+ "⪓": true,
|
||
+ "⪅": true,
|
||
+ "⋖": true,
|
||
+ "⋚": true,
|
||
+ "⪋": true,
|
||
+ "≶": true,
|
||
+ "≲": true,
|
||
+ "⥼": true,
|
||
+ "⌊": true,
|
||
+ "𝔩": true,
|
||
+ "≶": true,
|
||
+ "⪑": true,
|
||
+ "↽": true,
|
||
+ "↼": true,
|
||
+ "⥪": true,
|
||
+ "▄": true,
|
||
+ "љ": true,
|
||
+ "≪": true,
|
||
+ "⇇": true,
|
||
+ "⌞": true,
|
||
+ "⥫": true,
|
||
+ "◺": true,
|
||
+ "ŀ": true,
|
||
+ "⎰": true,
|
||
+ "⎰": true,
|
||
+ "≨": true,
|
||
+ "⪉": true,
|
||
+ "⪉": true,
|
||
+ "⪇": true,
|
||
+ "⪇": true,
|
||
+ "≨": true,
|
||
+ "⋦": true,
|
||
+ "⟬": true,
|
||
+ "⇽": true,
|
||
+ "⟦": true,
|
||
+ "⟵": true,
|
||
+ "⟷": true,
|
||
+ "⟼": true,
|
||
+ "⟶": true,
|
||
+ "↫": true,
|
||
+ "↬": true,
|
||
+ "⦅": true,
|
||
+ "𝕝": true,
|
||
+ "⨭": true,
|
||
+ "⨴": true,
|
||
+ "∗": true,
|
||
+ "_": true,
|
||
+ "◊": true,
|
||
+ "◊": true,
|
||
+ "⧫": true,
|
||
+ "(": true,
|
||
+ "⦓": true,
|
||
+ "⇆": true,
|
||
+ "⌟": true,
|
||
+ "⇋": true,
|
||
+ "⥭": true,
|
||
+ "‎": true,
|
||
+ "⊿": true,
|
||
+ "‹": true,
|
||
+ "𝓁": true,
|
||
+ "↰": true,
|
||
+ "≲": true,
|
||
+ "⪍": true,
|
||
+ "⪏": true,
|
||
+ "[": true,
|
||
+ "‘": true,
|
||
+ "‚": true,
|
||
+ "ł": true,
|
||
+ "<": true,
|
||
+ "<": true,
|
||
+ "⪦": true,
|
||
+ "⩹": true,
|
||
+ "⋖": true,
|
||
+ "⋋": true,
|
||
+ "⋉": true,
|
||
+ "⥶": true,
|
||
+ "⩻": true,
|
||
+ "⦖": true,
|
||
+ "◃": true,
|
||
+ "⊴": true,
|
||
+ "◂": true,
|
||
+ "⥊": true,
|
||
+ "⥦": true,
|
||
+ "≨︀": true,
|
||
+ "≨︀": true,
|
||
+ "∺": true,
|
||
+ "¯": true,
|
||
+ "¯": true,
|
||
+ "♂": true,
|
||
+ "✠": true,
|
||
+ "✠": true,
|
||
+ "↦": true,
|
||
+ "↦": true,
|
||
+ "↧": true,
|
||
+ "↤": true,
|
||
+ "↥": true,
|
||
+ "▮": true,
|
||
+ "⨩": true,
|
||
+ "м": true,
|
||
+ "—": true,
|
||
+ "∡": true,
|
||
+ "𝔪": true,
|
||
+ "℧": true,
|
||
+ "µ": true,
|
||
+ "µ": true,
|
||
+ "∣": true,
|
||
+ "*": true,
|
||
+ "⫰": true,
|
||
+ "·": true,
|
||
+ "·": true,
|
||
+ "−": true,
|
||
+ "⊟": true,
|
||
+ "∸": true,
|
||
+ "⨪": true,
|
||
+ "⫛": true,
|
||
+ "…": true,
|
||
+ "∓": true,
|
||
+ "⊧": true,
|
||
+ "𝕞": true,
|
||
+ "∓": true,
|
||
+ "𝓂": true,
|
||
+ "∾": true,
|
||
+ "μ": true,
|
||
+ "⊸": true,
|
||
+ "⊸": true,
|
||
+ "⋙̸": true,
|
||
+ "≫⃒": true,
|
||
+ "≫̸": true,
|
||
+ "⇍": true,
|
||
+ "⇎": true,
|
||
+ "⋘̸": true,
|
||
+ "≪⃒": true,
|
||
+ "≪̸": true,
|
||
+ "⇏": true,
|
||
+ "⊯": true,
|
||
+ "⊮": true,
|
||
+ "∇": true,
|
||
+ "ń": true,
|
||
+ "∠⃒": true,
|
||
+ "≉": true,
|
||
+ "⩰̸": true,
|
||
+ "≋̸": true,
|
||
+ "ʼn": true,
|
||
+ "≉": true,
|
||
+ "♮": true,
|
||
+ "♮": true,
|
||
+ "ℕ": true,
|
||
+ " ": true,
|
||
+ " ": true,
|
||
+ "≎̸": true,
|
||
+ "≏̸": true,
|
||
+ "⩃": true,
|
||
+ "ň": true,
|
||
+ "ņ": true,
|
||
+ "≇": true,
|
||
+ "⩭̸": true,
|
||
+ "⩂": true,
|
||
+ "н": true,
|
||
+ "–": true,
|
||
+ "≠": true,
|
||
+ "⇗": true,
|
||
+ "⤤": true,
|
||
+ "↗": true,
|
||
+ "↗": true,
|
||
+ "≐̸": true,
|
||
+ "≢": true,
|
||
+ "⤨": true,
|
||
+ "≂̸": true,
|
||
+ "∄": true,
|
||
+ "∄": true,
|
||
+ "𝔫": true,
|
||
+ "≧̸": true,
|
||
+ "≱": true,
|
||
+ "≱": true,
|
||
+ "≧̸": true,
|
||
+ "⩾̸": true,
|
||
+ "⩾̸": true,
|
||
+ "≵": true,
|
||
+ "≯": true,
|
||
+ "≯": true,
|
||
+ "⇎": true,
|
||
+ "↮": true,
|
||
+ "⫲": true,
|
||
+ "∋": true,
|
||
+ "⋼": true,
|
||
+ "⋺": true,
|
||
+ "∋": true,
|
||
+ "њ": true,
|
||
+ "⇍": true,
|
||
+ "≦̸": true,
|
||
+ "↚": true,
|
||
+ "‥": true,
|
||
+ "≰": true,
|
||
+ "↚": true,
|
||
+ "↮": true,
|
||
+ "≰": true,
|
||
+ "≦̸": true,
|
||
+ "⩽̸": true,
|
||
+ "⩽̸": true,
|
||
+ "≮": true,
|
||
+ "≴": true,
|
||
+ "≮": true,
|
||
+ "⋪": true,
|
||
+ "⋬": true,
|
||
+ "∤": true,
|
||
+ "𝕟": true,
|
||
+ "¬": true,
|
||
+ "¬": true,
|
||
+ "∉": true,
|
||
+ "⋹̸": true,
|
||
+ "⋵̸": true,
|
||
+ "∉": true,
|
||
+ "⋷": true,
|
||
+ "⋶": true,
|
||
+ "∌": true,
|
||
+ "∌": true,
|
||
+ "⋾": true,
|
||
+ "⋽": true,
|
||
+ "∦": true,
|
||
+ "∦": true,
|
||
+ "⫽⃥": true,
|
||
+ "∂̸": true,
|
||
+ "⨔": true,
|
||
+ "⊀": true,
|
||
+ "⋠": true,
|
||
+ "⪯̸": true,
|
||
+ "⊀": true,
|
||
+ "⪯̸": true,
|
||
+ "⇏": true,
|
||
+ "↛": true,
|
||
+ "⤳̸": true,
|
||
+ "↝̸": true,
|
||
+ "↛": true,
|
||
+ "⋫": true,
|
||
+ "⋭": true,
|
||
+ "⊁": true,
|
||
+ "⋡": true,
|
||
+ "⪰̸": true,
|
||
+ "𝓃": true,
|
||
+ "∤": true,
|
||
+ "∦": true,
|
||
+ "≁": true,
|
||
+ "≄": true,
|
||
+ "≄": true,
|
||
+ "∤": true,
|
||
+ "∦": true,
|
||
+ "⋢": true,
|
||
+ "⋣": true,
|
||
+ "⊄": true,
|
||
+ "⫅̸": true,
|
||
+ "⊈": true,
|
||
+ "⊂⃒": true,
|
||
+ "⊈": true,
|
||
+ "⫅̸": true,
|
||
+ "⊁": true,
|
||
+ "⪰̸": true,
|
||
+ "⊅": true,
|
||
+ "⫆̸": true,
|
||
+ "⊉": true,
|
||
+ "⊃⃒": true,
|
||
+ "⊉": true,
|
||
+ "⫆̸": true,
|
||
+ "≹": true,
|
||
+ "ñ": true,
|
||
+ "ñ": true,
|
||
+ "≸": true,
|
||
+ "⋪": true,
|
||
+ "⋬": true,
|
||
+ "⋫": true,
|
||
+ "⋭": true,
|
||
+ "ν": true,
|
||
+ "#": true,
|
||
+ "№": true,
|
||
+ " ": true,
|
||
+ "⊭": true,
|
||
+ "⤄": true,
|
||
+ "≍⃒": true,
|
||
+ "⊬": true,
|
||
+ "≥⃒": true,
|
||
+ ">⃒": true,
|
||
+ "⧞": true,
|
||
+ "⤂": true,
|
||
+ "≤⃒": true,
|
||
+ "<⃒": true,
|
||
+ "⊴⃒": true,
|
||
+ "⤃": true,
|
||
+ "⊵⃒": true,
|
||
+ "∼⃒": true,
|
||
+ "⇖": true,
|
||
+ "⤣": true,
|
||
+ "↖": true,
|
||
+ "↖": true,
|
||
+ "⤧": true,
|
||
+ "Ⓢ": true,
|
||
+ "ó": true,
|
||
+ "ó": true,
|
||
+ "⊛": true,
|
||
+ "⊚": true,
|
||
+ "ô": true,
|
||
+ "ô": true,
|
||
+ "о": true,
|
||
+ "⊝": true,
|
||
+ "ő": true,
|
||
+ "⨸": true,
|
||
+ "⊙": true,
|
||
+ "⦼": true,
|
||
+ "œ": true,
|
||
+ "⦿": true,
|
||
+ "𝔬": true,
|
||
+ "˛": true,
|
||
+ "ò": true,
|
||
+ "ò": true,
|
||
+ "⧁": true,
|
||
+ "⦵": true,
|
||
+ "Ω": true,
|
||
+ "∮": true,
|
||
+ "↺": true,
|
||
+ "⦾": true,
|
||
+ "⦻": true,
|
||
+ "‾": true,
|
||
+ "⧀": true,
|
||
+ "ō": true,
|
||
+ "ω": true,
|
||
+ "ο": true,
|
||
+ "⦶": true,
|
||
+ "⊖": true,
|
||
+ "𝕠": true,
|
||
+ "⦷": true,
|
||
+ "⦹": true,
|
||
+ "⊕": true,
|
||
+ "∨": true,
|
||
+ "↻": true,
|
||
+ "⩝": true,
|
||
+ "ℴ": true,
|
||
+ "ℴ": true,
|
||
+ "ª": true,
|
||
+ "ª": true,
|
||
+ "º": true,
|
||
+ "º": true,
|
||
+ "⊶": true,
|
||
+ "⩖": true,
|
||
+ "⩗": true,
|
||
+ "⩛": true,
|
||
+ "ℴ": true,
|
||
+ "ø": true,
|
||
+ "ø": true,
|
||
+ "⊘": true,
|
||
+ "õ": true,
|
||
+ "õ": true,
|
||
+ "⊗": true,
|
||
+ "⨶": true,
|
||
+ "ö": true,
|
||
+ "ö": true,
|
||
+ "⌽": true,
|
||
+ "∥": true,
|
||
+ "¶": true,
|
||
+ "¶": true,
|
||
+ "∥": true,
|
||
+ "⫳": true,
|
||
+ "⫽": true,
|
||
+ "∂": true,
|
||
+ "п": true,
|
||
+ "%": true,
|
||
+ ".": true,
|
||
+ "‰": true,
|
||
+ "⊥": true,
|
||
+ "‱": true,
|
||
+ "𝔭": true,
|
||
+ "φ": true,
|
||
+ "ϕ": true,
|
||
+ "ℳ": true,
|
||
+ "☎": true,
|
||
+ "π": true,
|
||
+ "⋔": true,
|
||
+ "ϖ": true,
|
||
+ "ℏ": true,
|
||
+ "ℎ": true,
|
||
+ "ℏ": true,
|
||
+ "+": true,
|
||
+ "⨣": true,
|
||
+ "⊞": true,
|
||
+ "⨢": true,
|
||
+ "∔": true,
|
||
+ "⨥": true,
|
||
+ "⩲": true,
|
||
+ "±": true,
|
||
+ "±": true,
|
||
+ "⨦": true,
|
||
+ "⨧": true,
|
||
+ "±": true,
|
||
+ "⨕": true,
|
||
+ "𝕡": true,
|
||
+ "£": true,
|
||
+ "£": true,
|
||
+ "≺": true,
|
||
+ "⪳": true,
|
||
+ "⪷": true,
|
||
+ "≼": true,
|
||
+ "⪯": true,
|
||
+ "≺": true,
|
||
+ "⪷": true,
|
||
+ "≼": true,
|
||
+ "⪯": true,
|
||
+ "⪹": true,
|
||
+ "⪵": true,
|
||
+ "⋨": true,
|
||
+ "≾": true,
|
||
+ "′": true,
|
||
+ "ℙ": true,
|
||
+ "⪵": true,
|
||
+ "⪹": true,
|
||
+ "⋨": true,
|
||
+ "∏": true,
|
||
+ "⌮": true,
|
||
+ "⌒": true,
|
||
+ "⌓": true,
|
||
+ "∝": true,
|
||
+ "∝": true,
|
||
+ "≾": true,
|
||
+ "⊰": true,
|
||
+ "𝓅": true,
|
||
+ "ψ": true,
|
||
+ " ": true,
|
||
+ "𝔮": true,
|
||
+ "⨌": true,
|
||
+ "𝕢": true,
|
||
+ "⁗": true,
|
||
+ "𝓆": true,
|
||
+ "ℍ": true,
|
||
+ "⨖": true,
|
||
+ "?": true,
|
||
+ "≟": true,
|
||
+ """: true,
|
||
+ """: true,
|
||
+ "⇛": true,
|
||
+ "⇒": true,
|
||
+ "⤜": true,
|
||
+ "⤏": true,
|
||
+ "⥤": true,
|
||
+ "∽̱": true,
|
||
+ "ŕ": true,
|
||
+ "√": true,
|
||
+ "⦳": true,
|
||
+ "⟩": true,
|
||
+ "⦒": true,
|
||
+ "⦥": true,
|
||
+ "⟩": true,
|
||
+ "»": true,
|
||
+ "»": true,
|
||
+ "→": true,
|
||
+ "⥵": true,
|
||
+ "⇥": true,
|
||
+ "⤠": true,
|
||
+ "⤳": true,
|
||
+ "⤞": true,
|
||
+ "↪": true,
|
||
+ "↬": true,
|
||
+ "⥅": true,
|
||
+ "⥴": true,
|
||
+ "↣": true,
|
||
+ "↝": true,
|
||
+ "⤚": true,
|
||
+ "∶": true,
|
||
+ "ℚ": true,
|
||
+ "⤍": true,
|
||
+ "❳": true,
|
||
+ "}": true,
|
||
+ "]": true,
|
||
+ "⦌": true,
|
||
+ "⦎": true,
|
||
+ "⦐": true,
|
||
+ "ř": true,
|
||
+ "ŗ": true,
|
||
+ "⌉": true,
|
||
+ "}": true,
|
||
+ "р": true,
|
||
+ "⤷": true,
|
||
+ "⥩": true,
|
||
+ "”": true,
|
||
+ "”": true,
|
||
+ "↳": true,
|
||
+ "ℜ": true,
|
||
+ "ℛ": true,
|
||
+ "ℜ": true,
|
||
+ "ℝ": true,
|
||
+ "▭": true,
|
||
+ "®": true,
|
||
+ "®": true,
|
||
+ "⥽": true,
|
||
+ "⌋": true,
|
||
+ "𝔯": true,
|
||
+ "⇁": true,
|
||
+ "⇀": true,
|
||
+ "⥬": true,
|
||
+ "ρ": true,
|
||
+ "ϱ": true,
|
||
+ "→": true,
|
||
+ "↣": true,
|
||
+ "⇁": true,
|
||
+ "⇀": true,
|
||
+ "⇄": true,
|
||
+ "⇌": true,
|
||
+ "⇉": true,
|
||
+ "↝": true,
|
||
+ "⋌": true,
|
||
+ "˚": true,
|
||
+ "≓": true,
|
||
+ "⇄": true,
|
||
+ "⇌": true,
|
||
+ "‏": true,
|
||
+ "⎱": true,
|
||
+ "⎱": true,
|
||
+ "⫮": true,
|
||
+ "⟭": true,
|
||
+ "⇾": true,
|
||
+ "⟧": true,
|
||
+ "⦆": true,
|
||
+ "𝕣": true,
|
||
+ "⨮": true,
|
||
+ "⨵": true,
|
||
+ ")": true,
|
||
+ "⦔": true,
|
||
+ "⨒": true,
|
||
+ "⇉": true,
|
||
+ "›": true,
|
||
+ "𝓇": true,
|
||
+ "↱": true,
|
||
+ "]": true,
|
||
+ "’": true,
|
||
+ "’": true,
|
||
+ "⋌": true,
|
||
+ "⋊": true,
|
||
+ "▹": true,
|
||
+ "⊵": true,
|
||
+ "▸": true,
|
||
+ "⧎": true,
|
||
+ "⥨": true,
|
||
+ "℞": true,
|
||
+ "ś": true,
|
||
+ "‚": true,
|
||
+ "≻": true,
|
||
+ "⪴": true,
|
||
+ "⪸": true,
|
||
+ "š": true,
|
||
+ "≽": true,
|
||
+ "⪰": true,
|
||
+ "ş": true,
|
||
+ "ŝ": true,
|
||
+ "⪶": true,
|
||
+ "⪺": true,
|
||
+ "⋩": true,
|
||
+ "⨓": true,
|
||
+ "≿": true,
|
||
+ "с": true,
|
||
+ "⋅": true,
|
||
+ "⊡": true,
|
||
+ "⩦": true,
|
||
+ "⇘": true,
|
||
+ "⤥": true,
|
||
+ "↘": true,
|
||
+ "↘": true,
|
||
+ "§": true,
|
||
+ "§": true,
|
||
+ ";": true,
|
||
+ "⤩": true,
|
||
+ "∖": true,
|
||
+ "∖": true,
|
||
+ "✶": true,
|
||
+ "𝔰": true,
|
||
+ "⌢": true,
|
||
+ "♯": true,
|
||
+ "щ": true,
|
||
+ "ш": true,
|
||
+ "∣": true,
|
||
+ "∥": true,
|
||
+ "­": true,
|
||
+ "­": true,
|
||
+ "σ": true,
|
||
+ "ς": true,
|
||
+ "ς": true,
|
||
+ "∼": true,
|
||
+ "⩪": true,
|
||
+ "≃": true,
|
||
+ "≃": true,
|
||
+ "⪞": true,
|
||
+ "⪠": true,
|
||
+ "⪝": true,
|
||
+ "⪟": true,
|
||
+ "≆": true,
|
||
+ "⨤": true,
|
||
+ "⥲": true,
|
||
+ "←": true,
|
||
+ "∖": true,
|
||
+ "⨳": true,
|
||
+ "⧤": true,
|
||
+ "∣": true,
|
||
+ "⌣": true,
|
||
+ "⪪": true,
|
||
+ "⪬": true,
|
||
+ "⪬︀": true,
|
||
+ "ь": true,
|
||
+ "/": true,
|
||
+ "⧄": true,
|
||
+ "⌿": true,
|
||
+ "𝕤": true,
|
||
+ "♠": true,
|
||
+ "♠": true,
|
||
+ "∥": true,
|
||
+ "⊓": true,
|
||
+ "⊓︀": true,
|
||
+ "⊔": true,
|
||
+ "⊔︀": true,
|
||
+ "⊏": true,
|
||
+ "⊑": true,
|
||
+ "⊏": true,
|
||
+ "⊑": true,
|
||
+ "⊐": true,
|
||
+ "⊒": true,
|
||
+ "⊐": true,
|
||
+ "⊒": true,
|
||
+ "□": true,
|
||
+ "□": true,
|
||
+ "▪": true,
|
||
+ "▪": true,
|
||
+ "→": true,
|
||
+ "𝓈": true,
|
||
+ "∖": true,
|
||
+ "⌣": true,
|
||
+ "⋆": true,
|
||
+ "☆": true,
|
||
+ "★": true,
|
||
+ "ϵ": true,
|
||
+ "ϕ": true,
|
||
+ "¯": true,
|
||
+ "⊂": true,
|
||
+ "⫅": true,
|
||
+ "⪽": true,
|
||
+ "⊆": true,
|
||
+ "⫃": true,
|
||
+ "⫁": true,
|
||
+ "⫋": true,
|
||
+ "⊊": true,
|
||
+ "⪿": true,
|
||
+ "⥹": true,
|
||
+ "⊂": true,
|
||
+ "⊆": true,
|
||
+ "⫅": true,
|
||
+ "⊊": true,
|
||
+ "⫋": true,
|
||
+ "⫇": true,
|
||
+ "⫕": true,
|
||
+ "⫓": true,
|
||
+ "≻": true,
|
||
+ "⪸": true,
|
||
+ "≽": true,
|
||
+ "⪰": true,
|
||
+ "⪺": true,
|
||
+ "⪶": true,
|
||
+ "⋩": true,
|
||
+ "≿": true,
|
||
+ "∑": true,
|
||
+ "♪": true,
|
||
+ "¹": true,
|
||
+ "¹": true,
|
||
+ "²": true,
|
||
+ "²": true,
|
||
+ "³": true,
|
||
+ "³": true,
|
||
+ "⊃": true,
|
||
+ "⫆": true,
|
||
+ "⪾": true,
|
||
+ "⫘": true,
|
||
+ "⊇": true,
|
||
+ "⫄": true,
|
||
+ "⟉": true,
|
||
+ "⫗": true,
|
||
+ "⥻": true,
|
||
+ "⫂": true,
|
||
+ "⫌": true,
|
||
+ "⊋": true,
|
||
+ "⫀": true,
|
||
+ "⊃": true,
|
||
+ "⊇": true,
|
||
+ "⫆": true,
|
||
+ "⊋": true,
|
||
+ "⫌": true,
|
||
+ "⫈": true,
|
||
+ "⫔": true,
|
||
+ "⫖": true,
|
||
+ "⇙": true,
|
||
+ "⤦": true,
|
||
+ "↙": true,
|
||
+ "↙": true,
|
||
+ "⤪": true,
|
||
+ "ß": true,
|
||
+ "ß": true,
|
||
+ "⌖": true,
|
||
+ "τ": true,
|
||
+ "⎴": true,
|
||
+ "ť": true,
|
||
+ "ţ": true,
|
||
+ "т": true,
|
||
+ "⃛": true,
|
||
+ "⌕": true,
|
||
+ "𝔱": true,
|
||
+ "∴": true,
|
||
+ "∴": true,
|
||
+ "θ": true,
|
||
+ "ϑ": true,
|
||
+ "ϑ": true,
|
||
+ "≈": true,
|
||
+ "∼": true,
|
||
+ " ": true,
|
||
+ "≈": true,
|
||
+ "∼": true,
|
||
+ "þ": true,
|
||
+ "þ": true,
|
||
+ "˜": true,
|
||
+ "×": true,
|
||
+ "×": true,
|
||
+ "⊠": true,
|
||
+ "⨱": true,
|
||
+ "⨰": true,
|
||
+ "∭": true,
|
||
+ "⤨": true,
|
||
+ "⊤": true,
|
||
+ "⌶": true,
|
||
+ "⫱": true,
|
||
+ "𝕥": true,
|
||
+ "⫚": true,
|
||
+ "⤩": true,
|
||
+ "‴": true,
|
||
+ "™": true,
|
||
+ "▵": true,
|
||
+ "▿": true,
|
||
+ "◃": true,
|
||
+ "⊴": true,
|
||
+ "≜": true,
|
||
+ "▹": true,
|
||
+ "⊵": true,
|
||
+ "◬": true,
|
||
+ "≜": true,
|
||
+ "⨺": true,
|
||
+ "⨹": true,
|
||
+ "⧍": true,
|
||
+ "⨻": true,
|
||
+ "⏢": true,
|
||
+ "𝓉": true,
|
||
+ "ц": true,
|
||
+ "ћ": true,
|
||
+ "ŧ": true,
|
||
+ "≬": true,
|
||
+ "↞": true,
|
||
+ "↠": true,
|
||
+ "⇑": true,
|
||
+ "⥣": true,
|
||
+ "ú": true,
|
||
+ "ú": true,
|
||
+ "↑": true,
|
||
+ "ў": true,
|
||
+ "ŭ": true,
|
||
+ "û": true,
|
||
+ "û": true,
|
||
+ "у": true,
|
||
+ "⇅": true,
|
||
+ "ű": true,
|
||
+ "⥮": true,
|
||
+ "⥾": true,
|
||
+ "𝔲": true,
|
||
+ "ù": true,
|
||
+ "ù": true,
|
||
+ "↿": true,
|
||
+ "↾": true,
|
||
+ "▀": true,
|
||
+ "⌜": true,
|
||
+ "⌜": true,
|
||
+ "⌏": true,
|
||
+ "◸": true,
|
||
+ "ū": true,
|
||
+ "¨": true,
|
||
+ "¨": true,
|
||
+ "ų": true,
|
||
+ "𝕦": true,
|
||
+ "↑": true,
|
||
+ "↕": true,
|
||
+ "↿": true,
|
||
+ "↾": true,
|
||
+ "⊎": true,
|
||
+ "υ": true,
|
||
+ "ϒ": true,
|
||
+ "υ": true,
|
||
+ "⇈": true,
|
||
+ "⌝": true,
|
||
+ "⌝": true,
|
||
+ "⌎": true,
|
||
+ "ů": true,
|
||
+ "◹": true,
|
||
+ "𝓊": true,
|
||
+ "⋰": true,
|
||
+ "ũ": true,
|
||
+ "▵": true,
|
||
+ "▴": true,
|
||
+ "⇈": true,
|
||
+ "ü": true,
|
||
+ "ü": true,
|
||
+ "⦧": true,
|
||
+ "⇕": true,
|
||
+ "⫨": true,
|
||
+ "⫩": true,
|
||
+ "⊨": true,
|
||
+ "⦜": true,
|
||
+ "ϵ": true,
|
||
+ "ϰ": true,
|
||
+ "∅": true,
|
||
+ "ϕ": true,
|
||
+ "ϖ": true,
|
||
+ "∝": true,
|
||
+ "↕": true,
|
||
+ "ϱ": true,
|
||
+ "ς": true,
|
||
+ "⊊︀": true,
|
||
+ "⫋︀": true,
|
||
+ "⊋︀": true,
|
||
+ "⫌︀": true,
|
||
+ "ϑ": true,
|
||
+ "⊲": true,
|
||
+ "⊳": true,
|
||
+ "в": true,
|
||
+ "⊢": true,
|
||
+ "∨": true,
|
||
+ "⊻": true,
|
||
+ "≚": true,
|
||
+ "⋮": true,
|
||
+ "|": true,
|
||
+ "|": true,
|
||
+ "𝔳": true,
|
||
+ "⊲": true,
|
||
+ "⊂⃒": true,
|
||
+ "⊃⃒": true,
|
||
+ "𝕧": true,
|
||
+ "∝": true,
|
||
+ "⊳": true,
|
||
+ "𝓋": true,
|
||
+ "⫋︀": true,
|
||
+ "⊊︀": true,
|
||
+ "⫌︀": true,
|
||
+ "⊋︀": true,
|
||
+ "⦚": true,
|
||
+ "ŵ": true,
|
||
+ "⩟": true,
|
||
+ "∧": true,
|
||
+ "≙": true,
|
||
+ "℘": true,
|
||
+ "𝔴": true,
|
||
+ "𝕨": true,
|
||
+ "℘": true,
|
||
+ "≀": true,
|
||
+ "≀": true,
|
||
+ "𝓌": true,
|
||
+ "⋂": true,
|
||
+ "◯": true,
|
||
+ "⋃": true,
|
||
+ "▽": true,
|
||
+ "𝔵": true,
|
||
+ "⟺": true,
|
||
+ "⟷": true,
|
||
+ "ξ": true,
|
||
+ "⟸": true,
|
||
+ "⟵": true,
|
||
+ "⟼": true,
|
||
+ "⋻": true,
|
||
+ "⨀": true,
|
||
+ "𝕩": true,
|
||
+ "⨁": true,
|
||
+ "⨂": true,
|
||
+ "⟹": true,
|
||
+ "⟶": true,
|
||
+ "𝓍": true,
|
||
+ "⨆": true,
|
||
+ "⨄": true,
|
||
+ "△": true,
|
||
+ "⋁": true,
|
||
+ "⋀": true,
|
||
+ "ý": true,
|
||
+ "ý": true,
|
||
+ "я": true,
|
||
+ "ŷ": true,
|
||
+ "ы": true,
|
||
+ "¥": true,
|
||
+ "¥": true,
|
||
+ "𝔶": true,
|
||
+ "ї": true,
|
||
+ "𝕪": true,
|
||
+ "𝓎": true,
|
||
+ "ю": true,
|
||
+ "ÿ": true,
|
||
+ "ÿ": true,
|
||
+ "ź": true,
|
||
+ "ž": true,
|
||
+ "з": true,
|
||
+ "ż": true,
|
||
+ "ℨ": true,
|
||
+ "ζ": true,
|
||
+ "𝔷": true,
|
||
+ "ж": true,
|
||
+ "⇝": true,
|
||
+ "𝕫": true,
|
||
+ "𝓏": true,
|
||
+ "‍": true,
|
||
+ "‌": true,
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go
|
||
new file mode 100644
|
||
index 000000000000..6ab60102c9bf
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/esc.go
|
||
@@ -0,0 +1,70 @@
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "html"
|
||
+ "io"
|
||
+)
|
||
+
|
||
+var htmlEscaper = [256][]byte{
|
||
+ '&': []byte("&"),
|
||
+ '<': []byte("<"),
|
||
+ '>': []byte(">"),
|
||
+ '"': []byte("""),
|
||
+}
|
||
+
|
||
+func escapeHTML(w io.Writer, s []byte) {
|
||
+ escapeEntities(w, s, false)
|
||
+}
|
||
+
|
||
+func escapeAllHTML(w io.Writer, s []byte) {
|
||
+ escapeEntities(w, s, true)
|
||
+}
|
||
+
|
||
+func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) {
|
||
+ var start, end int
|
||
+ for end < len(s) {
|
||
+ escSeq := htmlEscaper[s[end]]
|
||
+ if escSeq != nil {
|
||
+ isEntity, entityEnd := nodeIsEntity(s, end)
|
||
+ if isEntity && !escapeValidEntities {
|
||
+ w.Write(s[start : entityEnd+1])
|
||
+ start = entityEnd + 1
|
||
+ } else {
|
||
+ w.Write(s[start:end])
|
||
+ w.Write(escSeq)
|
||
+ start = end + 1
|
||
+ }
|
||
+ }
|
||
+ end++
|
||
+ }
|
||
+ if start < len(s) && end <= len(s) {
|
||
+ w.Write(s[start:end])
|
||
+ }
|
||
+}
|
||
+
|
||
+func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) {
|
||
+ isEntity = false
|
||
+ endEntityPos = end + 1
|
||
+
|
||
+ if s[end] == '&' {
|
||
+ for endEntityPos < len(s) {
|
||
+ if s[endEntityPos] == ';' {
|
||
+ if entities[string(s[end:endEntityPos+1])] {
|
||
+ isEntity = true
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' {
|
||
+ break
|
||
+ }
|
||
+ endEntityPos++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return isEntity, endEntityPos
|
||
+}
|
||
+
|
||
+func escLink(w io.Writer, text []byte) {
|
||
+ unesc := html.UnescapeString(string(text))
|
||
+ escapeHTML(w, []byte(unesc))
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go
|
||
new file mode 100644
|
||
index 000000000000..cb4f26e30fd5
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/html.go
|
||
@@ -0,0 +1,952 @@
|
||
+//
|
||
+// Blackfriday Markdown Processor
|
||
+// Available at http://github.com/russross/blackfriday
|
||
+//
|
||
+// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||
+// Distributed under the Simplified BSD License.
|
||
+// See README.md for details.
|
||
+//
|
||
+
|
||
+//
|
||
+//
|
||
+// HTML rendering backend
|
||
+//
|
||
+//
|
||
+
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "regexp"
|
||
+ "strings"
|
||
+)
|
||
+
|
||
+// HTMLFlags control optional behavior of HTML renderer.
|
||
+type HTMLFlags int
|
||
+
|
||
+// HTML renderer configuration options.
|
||
+const (
|
||
+ HTMLFlagsNone HTMLFlags = 0
|
||
+ SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks
|
||
+ SkipImages // Skip embedded images
|
||
+ SkipLinks // Skip all links
|
||
+ Safelink // Only link to trusted protocols
|
||
+ NofollowLinks // Only link with rel="nofollow"
|
||
+ NoreferrerLinks // Only link with rel="noreferrer"
|
||
+ NoopenerLinks // Only link with rel="noopener"
|
||
+ HrefTargetBlank // Add a blank target
|
||
+ CompletePage // Generate a complete HTML page
|
||
+ UseXHTML // Generate XHTML output instead of HTML
|
||
+ FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source
|
||
+ Smartypants // Enable smart punctuation substitutions
|
||
+ SmartypantsFractions // Enable smart fractions (with Smartypants)
|
||
+ SmartypantsDashes // Enable smart dashes (with Smartypants)
|
||
+ SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants)
|
||
+ SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering
|
||
+ SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants)
|
||
+ TOC // Generate a table of contents
|
||
+)
|
||
+
|
||
+var (
|
||
+ htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag)
|
||
+)
|
||
+
|
||
+const (
|
||
+ htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" +
|
||
+ processingInstruction + "|" + declaration + "|" + cdata + ")"
|
||
+ closeTag = "</" + tagName + "\\s*[>]"
|
||
+ openTag = "<" + tagName + attribute + "*" + "\\s*/?>"
|
||
+ attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)"
|
||
+ attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")"
|
||
+ attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")"
|
||
+ attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*"
|
||
+ cdata = "<!\\[CDATA\\[[\\s\\S]*?\\]\\]>"
|
||
+ declaration = "<![A-Z]+" + "\\s+[^>]*>"
|
||
+ doubleQuotedValue = "\"[^\"]*\""
|
||
+ htmlComment = "<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->"
|
||
+ processingInstruction = "[<][?].*?[?][>]"
|
||
+ singleQuotedValue = "'[^']*'"
|
||
+ tagName = "[A-Za-z][A-Za-z0-9-]*"
|
||
+ unquotedValue = "[^\"'=<>`\\x00-\\x20]+"
|
||
+)
|
||
+
|
||
+// HTMLRendererParameters is a collection of supplementary parameters tweaking
|
||
+// the behavior of various parts of HTML renderer.
|
||
+type HTMLRendererParameters struct {
|
||
+ // Prepend this text to each relative URL.
|
||
+ AbsolutePrefix string
|
||
+ // Add this text to each footnote anchor, to ensure uniqueness.
|
||
+ FootnoteAnchorPrefix string
|
||
+ // Show this text inside the <a> tag for a footnote return link, if the
|
||
+ // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
|
||
+ // <sup>[return]</sup> is used.
|
||
+ FootnoteReturnLinkContents string
|
||
+ // If set, add this text to the front of each Heading ID, to ensure
|
||
+ // uniqueness.
|
||
+ HeadingIDPrefix string
|
||
+ // If set, add this text to the back of each Heading ID, to ensure uniqueness.
|
||
+ HeadingIDSuffix string
|
||
+ // Increase heading levels: if the offset is 1, <h1> becomes <h2> etc.
|
||
+ // Negative offset is also valid.
|
||
+ // Resulting levels are clipped between 1 and 6.
|
||
+ HeadingLevelOffset int
|
||
+
|
||
+ Title string // Document title (used if CompletePage is set)
|
||
+ CSS string // Optional CSS file URL (used if CompletePage is set)
|
||
+ Icon string // Optional icon file URL (used if CompletePage is set)
|
||
+
|
||
+ Flags HTMLFlags // Flags allow customizing this renderer's behavior
|
||
+}
|
||
+
|
||
+// HTMLRenderer is a type that implements the Renderer interface for HTML output.
|
||
+//
|
||
+// Do not create this directly, instead use the NewHTMLRenderer function.
|
||
+type HTMLRenderer struct {
|
||
+ HTMLRendererParameters
|
||
+
|
||
+ closeTag string // how to end singleton tags: either " />" or ">"
|
||
+
|
||
+ // Track heading IDs to prevent ID collision in a single generation.
|
||
+ headingIDs map[string]int
|
||
+
|
||
+ lastOutputLen int
|
||
+ disableTags int
|
||
+
|
||
+ sr *SPRenderer
|
||
+}
|
||
+
|
||
+const (
|
||
+ xhtmlClose = " />"
|
||
+ htmlClose = ">"
|
||
+)
|
||
+
|
||
+// NewHTMLRenderer creates and configures an HTMLRenderer object, which
|
||
+// satisfies the Renderer interface.
|
||
+func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer {
|
||
+ // configure the rendering engine
|
||
+ closeTag := htmlClose
|
||
+ if params.Flags&UseXHTML != 0 {
|
||
+ closeTag = xhtmlClose
|
||
+ }
|
||
+
|
||
+ if params.FootnoteReturnLinkContents == "" {
|
||
+ // U+FE0E is VARIATION SELECTOR-15.
|
||
+ // It suppresses automatic emoji presentation of the preceding
|
||
+ // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS.
|
||
+ params.FootnoteReturnLinkContents = "<span aria-label='Return'>↩\ufe0e</span>"
|
||
+ }
|
||
+
|
||
+ return &HTMLRenderer{
|
||
+ HTMLRendererParameters: params,
|
||
+
|
||
+ closeTag: closeTag,
|
||
+ headingIDs: make(map[string]int),
|
||
+
|
||
+ sr: NewSmartypantsRenderer(params.Flags),
|
||
+ }
|
||
+}
|
||
+
|
||
+func isHTMLTag(tag []byte, tagname string) bool {
|
||
+ found, _ := findHTMLTagPos(tag, tagname)
|
||
+ return found
|
||
+}
|
||
+
|
||
+// Look for a character, but ignore it when it's in any kind of quotes, it
|
||
+// might be JavaScript
|
||
+func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
|
||
+ inSingleQuote := false
|
||
+ inDoubleQuote := false
|
||
+ inGraveQuote := false
|
||
+ i := start
|
||
+ for i < len(html) {
|
||
+ switch {
|
||
+ case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
|
||
+ return i
|
||
+ case html[i] == '\'':
|
||
+ inSingleQuote = !inSingleQuote
|
||
+ case html[i] == '"':
|
||
+ inDoubleQuote = !inDoubleQuote
|
||
+ case html[i] == '`':
|
||
+ inGraveQuote = !inGraveQuote
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ return start
|
||
+}
|
||
+
|
||
+func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
|
||
+ i := 0
|
||
+ if i < len(tag) && tag[0] != '<' {
|
||
+ return false, -1
|
||
+ }
|
||
+ i++
|
||
+ i = skipSpace(tag, i)
|
||
+
|
||
+ if i < len(tag) && tag[i] == '/' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ i = skipSpace(tag, i)
|
||
+ j := 0
|
||
+ for ; i < len(tag); i, j = i+1, j+1 {
|
||
+ if j >= len(tagname) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ if strings.ToLower(string(tag[i]))[0] != tagname[j] {
|
||
+ return false, -1
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if i == len(tag) {
|
||
+ return false, -1
|
||
+ }
|
||
+
|
||
+ rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
|
||
+ if rightAngle >= i {
|
||
+ return true, rightAngle
|
||
+ }
|
||
+
|
||
+ return false, -1
|
||
+}
|
||
+
|
||
+func skipSpace(tag []byte, i int) int {
|
||
+ for i < len(tag) && isspace(tag[i]) {
|
||
+ i++
|
||
+ }
|
||
+ return i
|
||
+}
|
||
+
|
||
+func isRelativeLink(link []byte) (yes bool) {
|
||
+ // a tag begin with '#'
|
||
+ if link[0] == '#' {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // link begin with '/' but not '//', the second maybe a protocol relative link
|
||
+ if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // only the root '/'
|
||
+ if len(link) == 1 && link[0] == '/' {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // current directory : begin with "./"
|
||
+ if bytes.HasPrefix(link, []byte("./")) {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // parent directory : begin with "../"
|
||
+ if bytes.HasPrefix(link, []byte("../")) {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ return false
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string {
|
||
+ for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
|
||
+ tmp := fmt.Sprintf("%s-%d", id, count+1)
|
||
+
|
||
+ if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
|
||
+ r.headingIDs[id] = count + 1
|
||
+ id = tmp
|
||
+ } else {
|
||
+ id = id + "-1"
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if _, found := r.headingIDs[id]; !found {
|
||
+ r.headingIDs[id] = 0
|
||
+ }
|
||
+
|
||
+ return id
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte {
|
||
+ if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
|
||
+ newDest := r.AbsolutePrefix
|
||
+ if link[0] != '/' {
|
||
+ newDest += "/"
|
||
+ }
|
||
+ newDest += string(link)
|
||
+ return []byte(newDest)
|
||
+ }
|
||
+ return link
|
||
+}
|
||
+
|
||
+func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string {
|
||
+ if isRelativeLink(link) {
|
||
+ return attrs
|
||
+ }
|
||
+ val := []string{}
|
||
+ if flags&NofollowLinks != 0 {
|
||
+ val = append(val, "nofollow")
|
||
+ }
|
||
+ if flags&NoreferrerLinks != 0 {
|
||
+ val = append(val, "noreferrer")
|
||
+ }
|
||
+ if flags&NoopenerLinks != 0 {
|
||
+ val = append(val, "noopener")
|
||
+ }
|
||
+ if flags&HrefTargetBlank != 0 {
|
||
+ attrs = append(attrs, "target=\"_blank\"")
|
||
+ }
|
||
+ if len(val) == 0 {
|
||
+ return attrs
|
||
+ }
|
||
+ attr := fmt.Sprintf("rel=%q", strings.Join(val, " "))
|
||
+ return append(attrs, attr)
|
||
+}
|
||
+
|
||
+func isMailto(link []byte) bool {
|
||
+ return bytes.HasPrefix(link, []byte("mailto:"))
|
||
+}
|
||
+
|
||
+func needSkipLink(flags HTMLFlags, dest []byte) bool {
|
||
+ if flags&SkipLinks != 0 {
|
||
+ return true
|
||
+ }
|
||
+ return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
|
||
+}
|
||
+
|
||
+func isSmartypantable(node *Node) bool {
|
||
+ pt := node.Parent.Type
|
||
+ return pt != Link && pt != CodeBlock && pt != Code
|
||
+}
|
||
+
|
||
+func appendLanguageAttr(attrs []string, info []byte) []string {
|
||
+ if len(info) == 0 {
|
||
+ return attrs
|
||
+ }
|
||
+ endOfLang := bytes.IndexAny(info, "\t ")
|
||
+ if endOfLang < 0 {
|
||
+ endOfLang = len(info)
|
||
+ }
|
||
+ return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang]))
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) {
|
||
+ w.Write(name)
|
||
+ if len(attrs) > 0 {
|
||
+ w.Write(spaceBytes)
|
||
+ w.Write([]byte(strings.Join(attrs, " ")))
|
||
+ }
|
||
+ w.Write(gtBytes)
|
||
+ r.lastOutputLen = 1
|
||
+}
|
||
+
|
||
+func footnoteRef(prefix string, node *Node) []byte {
|
||
+ urlFrag := prefix + string(slugify(node.Destination))
|
||
+ anchor := fmt.Sprintf(`<a href="#fn:%s">%d</a>`, urlFrag, node.NoteID)
|
||
+ return []byte(fmt.Sprintf(`<sup class="footnote-ref" id="fnref:%s">%s</sup>`, urlFrag, anchor))
|
||
+}
|
||
+
|
||
+func footnoteItem(prefix string, slug []byte) []byte {
|
||
+ return []byte(fmt.Sprintf(`<li id="fn:%s%s">`, prefix, slug))
|
||
+}
|
||
+
|
||
+func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte {
|
||
+ const format = ` <a class="footnote-return" href="#fnref:%s%s">%s</a>`
|
||
+ return []byte(fmt.Sprintf(format, prefix, slug, returnLink))
|
||
+}
|
||
+
|
||
+func itemOpenCR(node *Node) bool {
|
||
+ if node.Prev == nil {
|
||
+ return false
|
||
+ }
|
||
+ ld := node.Parent.ListData
|
||
+ return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0
|
||
+}
|
||
+
|
||
+func skipParagraphTags(node *Node) bool {
|
||
+ grandparent := node.Parent.Parent
|
||
+ if grandparent == nil || grandparent.Type != List {
|
||
+ return false
|
||
+ }
|
||
+ tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0
|
||
+ return grandparent.Type == List && tightOrTerm
|
||
+}
|
||
+
|
||
+func cellAlignment(align CellAlignFlags) string {
|
||
+ switch align {
|
||
+ case TableAlignmentLeft:
|
||
+ return "left"
|
||
+ case TableAlignmentRight:
|
||
+ return "right"
|
||
+ case TableAlignmentCenter:
|
||
+ return "center"
|
||
+ default:
|
||
+ return ""
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) out(w io.Writer, text []byte) {
|
||
+ if r.disableTags > 0 {
|
||
+ w.Write(htmlTagRe.ReplaceAll(text, []byte{}))
|
||
+ } else {
|
||
+ w.Write(text)
|
||
+ }
|
||
+ r.lastOutputLen = len(text)
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) cr(w io.Writer) {
|
||
+ if r.lastOutputLen > 0 {
|
||
+ r.out(w, nlBytes)
|
||
+ }
|
||
+}
|
||
+
|
||
+var (
|
||
+ nlBytes = []byte{'\n'}
|
||
+ gtBytes = []byte{'>'}
|
||
+ spaceBytes = []byte{' '}
|
||
+)
|
||
+
|
||
+var (
|
||
+ brTag = []byte("<br>")
|
||
+ brXHTMLTag = []byte("<br />")
|
||
+ emTag = []byte("<em>")
|
||
+ emCloseTag = []byte("</em>")
|
||
+ strongTag = []byte("<strong>")
|
||
+ strongCloseTag = []byte("</strong>")
|
||
+ delTag = []byte("<del>")
|
||
+ delCloseTag = []byte("</del>")
|
||
+ ttTag = []byte("<tt>")
|
||
+ ttCloseTag = []byte("</tt>")
|
||
+ aTag = []byte("<a")
|
||
+ aCloseTag = []byte("</a>")
|
||
+ preTag = []byte("<pre>")
|
||
+ preCloseTag = []byte("</pre>")
|
||
+ codeTag = []byte("<code>")
|
||
+ codeCloseTag = []byte("</code>")
|
||
+ pTag = []byte("<p>")
|
||
+ pCloseTag = []byte("</p>")
|
||
+ blockquoteTag = []byte("<blockquote>")
|
||
+ blockquoteCloseTag = []byte("</blockquote>")
|
||
+ hrTag = []byte("<hr>")
|
||
+ hrXHTMLTag = []byte("<hr />")
|
||
+ ulTag = []byte("<ul>")
|
||
+ ulCloseTag = []byte("</ul>")
|
||
+ olTag = []byte("<ol>")
|
||
+ olCloseTag = []byte("</ol>")
|
||
+ dlTag = []byte("<dl>")
|
||
+ dlCloseTag = []byte("</dl>")
|
||
+ liTag = []byte("<li>")
|
||
+ liCloseTag = []byte("</li>")
|
||
+ ddTag = []byte("<dd>")
|
||
+ ddCloseTag = []byte("</dd>")
|
||
+ dtTag = []byte("<dt>")
|
||
+ dtCloseTag = []byte("</dt>")
|
||
+ tableTag = []byte("<table>")
|
||
+ tableCloseTag = []byte("</table>")
|
||
+ tdTag = []byte("<td")
|
||
+ tdCloseTag = []byte("</td>")
|
||
+ thTag = []byte("<th")
|
||
+ thCloseTag = []byte("</th>")
|
||
+ theadTag = []byte("<thead>")
|
||
+ theadCloseTag = []byte("</thead>")
|
||
+ tbodyTag = []byte("<tbody>")
|
||
+ tbodyCloseTag = []byte("</tbody>")
|
||
+ trTag = []byte("<tr>")
|
||
+ trCloseTag = []byte("</tr>")
|
||
+ h1Tag = []byte("<h1")
|
||
+ h1CloseTag = []byte("</h1>")
|
||
+ h2Tag = []byte("<h2")
|
||
+ h2CloseTag = []byte("</h2>")
|
||
+ h3Tag = []byte("<h3")
|
||
+ h3CloseTag = []byte("</h3>")
|
||
+ h4Tag = []byte("<h4")
|
||
+ h4CloseTag = []byte("</h4>")
|
||
+ h5Tag = []byte("<h5")
|
||
+ h5CloseTag = []byte("</h5>")
|
||
+ h6Tag = []byte("<h6")
|
||
+ h6CloseTag = []byte("</h6>")
|
||
+
|
||
+ footnotesDivBytes = []byte("\n<div class=\"footnotes\">\n\n")
|
||
+ footnotesCloseDivBytes = []byte("\n</div>\n")
|
||
+)
|
||
+
|
||
+func headingTagsFromLevel(level int) ([]byte, []byte) {
|
||
+ if level <= 1 {
|
||
+ return h1Tag, h1CloseTag
|
||
+ }
|
||
+ switch level {
|
||
+ case 2:
|
||
+ return h2Tag, h2CloseTag
|
||
+ case 3:
|
||
+ return h3Tag, h3CloseTag
|
||
+ case 4:
|
||
+ return h4Tag, h4CloseTag
|
||
+ case 5:
|
||
+ return h5Tag, h5CloseTag
|
||
+ }
|
||
+ return h6Tag, h6CloseTag
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) outHRTag(w io.Writer) {
|
||
+ if r.Flags&UseXHTML == 0 {
|
||
+ r.out(w, hrTag)
|
||
+ } else {
|
||
+ r.out(w, hrXHTMLTag)
|
||
+ }
|
||
+}
|
||
+
|
||
+// RenderNode is a default renderer of a single node of a syntax tree. For
|
||
+// block nodes it will be called twice: first time with entering=true, second
|
||
+// time with entering=false, so that it could know when it's working on an open
|
||
+// tag and when on close. It writes the result to w.
|
||
+//
|
||
+// The return value is a way to tell the calling walker to adjust its walk
|
||
+// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
|
||
+// can ask the walker to skip a subtree of this node by returning SkipChildren.
|
||
+// The typical behavior is to return GoToNext, which asks for the usual
|
||
+// traversal to the next node.
|
||
+func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus {
|
||
+ attrs := []string{}
|
||
+ switch node.Type {
|
||
+ case Text:
|
||
+ if r.Flags&Smartypants != 0 {
|
||
+ var tmp bytes.Buffer
|
||
+ escapeHTML(&tmp, node.Literal)
|
||
+ r.sr.Process(w, tmp.Bytes())
|
||
+ } else {
|
||
+ if node.Parent.Type == Link {
|
||
+ escLink(w, node.Literal)
|
||
+ } else {
|
||
+ escapeHTML(w, node.Literal)
|
||
+ }
|
||
+ }
|
||
+ case Softbreak:
|
||
+ r.cr(w)
|
||
+ // TODO: make it configurable via out(renderer.softbreak)
|
||
+ case Hardbreak:
|
||
+ if r.Flags&UseXHTML == 0 {
|
||
+ r.out(w, brTag)
|
||
+ } else {
|
||
+ r.out(w, brXHTMLTag)
|
||
+ }
|
||
+ r.cr(w)
|
||
+ case Emph:
|
||
+ if entering {
|
||
+ r.out(w, emTag)
|
||
+ } else {
|
||
+ r.out(w, emCloseTag)
|
||
+ }
|
||
+ case Strong:
|
||
+ if entering {
|
||
+ r.out(w, strongTag)
|
||
+ } else {
|
||
+ r.out(w, strongCloseTag)
|
||
+ }
|
||
+ case Del:
|
||
+ if entering {
|
||
+ r.out(w, delTag)
|
||
+ } else {
|
||
+ r.out(w, delCloseTag)
|
||
+ }
|
||
+ case HTMLSpan:
|
||
+ if r.Flags&SkipHTML != 0 {
|
||
+ break
|
||
+ }
|
||
+ r.out(w, node.Literal)
|
||
+ case Link:
|
||
+ // mark it but don't link it if it is not a safe link: no smartypants
|
||
+ dest := node.LinkData.Destination
|
||
+ if needSkipLink(r.Flags, dest) {
|
||
+ if entering {
|
||
+ r.out(w, ttTag)
|
||
+ } else {
|
||
+ r.out(w, ttCloseTag)
|
||
+ }
|
||
+ } else {
|
||
+ if entering {
|
||
+ dest = r.addAbsPrefix(dest)
|
||
+ var hrefBuf bytes.Buffer
|
||
+ hrefBuf.WriteString("href=\"")
|
||
+ escLink(&hrefBuf, dest)
|
||
+ hrefBuf.WriteByte('"')
|
||
+ attrs = append(attrs, hrefBuf.String())
|
||
+ if node.NoteID != 0 {
|
||
+ r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
|
||
+ break
|
||
+ }
|
||
+ attrs = appendLinkAttrs(attrs, r.Flags, dest)
|
||
+ if len(node.LinkData.Title) > 0 {
|
||
+ var titleBuff bytes.Buffer
|
||
+ titleBuff.WriteString("title=\"")
|
||
+ escapeHTML(&titleBuff, node.LinkData.Title)
|
||
+ titleBuff.WriteByte('"')
|
||
+ attrs = append(attrs, titleBuff.String())
|
||
+ }
|
||
+ r.tag(w, aTag, attrs)
|
||
+ } else {
|
||
+ if node.NoteID != 0 {
|
||
+ break
|
||
+ }
|
||
+ r.out(w, aCloseTag)
|
||
+ }
|
||
+ }
|
||
+ case Image:
|
||
+ if r.Flags&SkipImages != 0 {
|
||
+ return SkipChildren
|
||
+ }
|
||
+ if entering {
|
||
+ dest := node.LinkData.Destination
|
||
+ dest = r.addAbsPrefix(dest)
|
||
+ if r.disableTags == 0 {
|
||
+ //if options.safe && potentiallyUnsafe(dest) {
|
||
+ //out(w, `<img src="" alt="`)
|
||
+ //} else {
|
||
+ r.out(w, []byte(`<img src="`))
|
||
+ escLink(w, dest)
|
||
+ r.out(w, []byte(`" alt="`))
|
||
+ //}
|
||
+ }
|
||
+ r.disableTags++
|
||
+ } else {
|
||
+ r.disableTags--
|
||
+ if r.disableTags == 0 {
|
||
+ if node.LinkData.Title != nil {
|
||
+ r.out(w, []byte(`" title="`))
|
||
+ escapeHTML(w, node.LinkData.Title)
|
||
+ }
|
||
+ r.out(w, []byte(`" />`))
|
||
+ }
|
||
+ }
|
||
+ case Code:
|
||
+ r.out(w, codeTag)
|
||
+ escapeAllHTML(w, node.Literal)
|
||
+ r.out(w, codeCloseTag)
|
||
+ case Document:
|
||
+ break
|
||
+ case Paragraph:
|
||
+ if skipParagraphTags(node) {
|
||
+ break
|
||
+ }
|
||
+ if entering {
|
||
+ // TODO: untangle this clusterfuck about when the newlines need
|
||
+ // to be added and when not.
|
||
+ if node.Prev != nil {
|
||
+ switch node.Prev.Type {
|
||
+ case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule:
|
||
+ r.cr(w)
|
||
+ }
|
||
+ }
|
||
+ if node.Parent.Type == BlockQuote && node.Prev == nil {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ r.out(w, pTag)
|
||
+ } else {
|
||
+ r.out(w, pCloseTag)
|
||
+ if !(node.Parent.Type == Item && node.Next == nil) {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ }
|
||
+ case BlockQuote:
|
||
+ if entering {
|
||
+ r.cr(w)
|
||
+ r.out(w, blockquoteTag)
|
||
+ } else {
|
||
+ r.out(w, blockquoteCloseTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case HTMLBlock:
|
||
+ if r.Flags&SkipHTML != 0 {
|
||
+ break
|
||
+ }
|
||
+ r.cr(w)
|
||
+ r.out(w, node.Literal)
|
||
+ r.cr(w)
|
||
+ case Heading:
|
||
+ headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level
|
||
+ openTag, closeTag := headingTagsFromLevel(headingLevel)
|
||
+ if entering {
|
||
+ if node.IsTitleblock {
|
||
+ attrs = append(attrs, `class="title"`)
|
||
+ }
|
||
+ if node.HeadingID != "" {
|
||
+ id := r.ensureUniqueHeadingID(node.HeadingID)
|
||
+ if r.HeadingIDPrefix != "" {
|
||
+ id = r.HeadingIDPrefix + id
|
||
+ }
|
||
+ if r.HeadingIDSuffix != "" {
|
||
+ id = id + r.HeadingIDSuffix
|
||
+ }
|
||
+ attrs = append(attrs, fmt.Sprintf(`id="%s"`, id))
|
||
+ }
|
||
+ r.cr(w)
|
||
+ r.tag(w, openTag, attrs)
|
||
+ } else {
|
||
+ r.out(w, closeTag)
|
||
+ if !(node.Parent.Type == Item && node.Next == nil) {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ }
|
||
+ case HorizontalRule:
|
||
+ r.cr(w)
|
||
+ r.outHRTag(w)
|
||
+ r.cr(w)
|
||
+ case List:
|
||
+ openTag := ulTag
|
||
+ closeTag := ulCloseTag
|
||
+ if node.ListFlags&ListTypeOrdered != 0 {
|
||
+ openTag = olTag
|
||
+ closeTag = olCloseTag
|
||
+ }
|
||
+ if node.ListFlags&ListTypeDefinition != 0 {
|
||
+ openTag = dlTag
|
||
+ closeTag = dlCloseTag
|
||
+ }
|
||
+ if entering {
|
||
+ if node.IsFootnotesList {
|
||
+ r.out(w, footnotesDivBytes)
|
||
+ r.outHRTag(w)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ r.cr(w)
|
||
+ if node.Parent.Type == Item && node.Parent.Parent.Tight {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ r.tag(w, openTag[:len(openTag)-1], attrs)
|
||
+ r.cr(w)
|
||
+ } else {
|
||
+ r.out(w, closeTag)
|
||
+ //cr(w)
|
||
+ //if node.parent.Type != Item {
|
||
+ // cr(w)
|
||
+ //}
|
||
+ if node.Parent.Type == Item && node.Next != nil {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ if node.Parent.Type == Document || node.Parent.Type == BlockQuote {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ if node.IsFootnotesList {
|
||
+ r.out(w, footnotesCloseDivBytes)
|
||
+ }
|
||
+ }
|
||
+ case Item:
|
||
+ openTag := liTag
|
||
+ closeTag := liCloseTag
|
||
+ if node.ListFlags&ListTypeDefinition != 0 {
|
||
+ openTag = ddTag
|
||
+ closeTag = ddCloseTag
|
||
+ }
|
||
+ if node.ListFlags&ListTypeTerm != 0 {
|
||
+ openTag = dtTag
|
||
+ closeTag = dtCloseTag
|
||
+ }
|
||
+ if entering {
|
||
+ if itemOpenCR(node) {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ if node.ListData.RefLink != nil {
|
||
+ slug := slugify(node.ListData.RefLink)
|
||
+ r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug))
|
||
+ break
|
||
+ }
|
||
+ r.out(w, openTag)
|
||
+ } else {
|
||
+ if node.ListData.RefLink != nil {
|
||
+ slug := slugify(node.ListData.RefLink)
|
||
+ if r.Flags&FootnoteReturnLinks != 0 {
|
||
+ r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug))
|
||
+ }
|
||
+ }
|
||
+ r.out(w, closeTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case CodeBlock:
|
||
+ attrs = appendLanguageAttr(attrs, node.Info)
|
||
+ r.cr(w)
|
||
+ r.out(w, preTag)
|
||
+ r.tag(w, codeTag[:len(codeTag)-1], attrs)
|
||
+ escapeAllHTML(w, node.Literal)
|
||
+ r.out(w, codeCloseTag)
|
||
+ r.out(w, preCloseTag)
|
||
+ if node.Parent.Type != Item {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case Table:
|
||
+ if entering {
|
||
+ r.cr(w)
|
||
+ r.out(w, tableTag)
|
||
+ } else {
|
||
+ r.out(w, tableCloseTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case TableCell:
|
||
+ openTag := tdTag
|
||
+ closeTag := tdCloseTag
|
||
+ if node.IsHeader {
|
||
+ openTag = thTag
|
||
+ closeTag = thCloseTag
|
||
+ }
|
||
+ if entering {
|
||
+ align := cellAlignment(node.Align)
|
||
+ if align != "" {
|
||
+ attrs = append(attrs, fmt.Sprintf(`align="%s"`, align))
|
||
+ }
|
||
+ if node.Prev == nil {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ r.tag(w, openTag, attrs)
|
||
+ } else {
|
||
+ r.out(w, closeTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case TableHead:
|
||
+ if entering {
|
||
+ r.cr(w)
|
||
+ r.out(w, theadTag)
|
||
+ } else {
|
||
+ r.out(w, theadCloseTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case TableBody:
|
||
+ if entering {
|
||
+ r.cr(w)
|
||
+ r.out(w, tbodyTag)
|
||
+ // XXX: this is to adhere to a rather silly test. Should fix test.
|
||
+ if node.FirstChild == nil {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ } else {
|
||
+ r.out(w, tbodyCloseTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case TableRow:
|
||
+ if entering {
|
||
+ r.cr(w)
|
||
+ r.out(w, trTag)
|
||
+ } else {
|
||
+ r.out(w, trCloseTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ default:
|
||
+ panic("Unknown node type " + node.Type.String())
|
||
+ }
|
||
+ return GoToNext
|
||
+}
|
||
+
|
||
+// RenderHeader writes HTML document preamble and TOC if requested.
|
||
+func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) {
|
||
+ r.writeDocumentHeader(w)
|
||
+ if r.Flags&TOC != 0 {
|
||
+ r.writeTOC(w, ast)
|
||
+ }
|
||
+}
|
||
+
|
||
+// RenderFooter writes HTML document footer.
|
||
+func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) {
|
||
+ if r.Flags&CompletePage == 0 {
|
||
+ return
|
||
+ }
|
||
+ io.WriteString(w, "\n</body>\n</html>\n")
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) {
|
||
+ if r.Flags&CompletePage == 0 {
|
||
+ return
|
||
+ }
|
||
+ ending := ""
|
||
+ if r.Flags&UseXHTML != 0 {
|
||
+ io.WriteString(w, "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
|
||
+ io.WriteString(w, "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
|
||
+ io.WriteString(w, "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
|
||
+ ending = " /"
|
||
+ } else {
|
||
+ io.WriteString(w, "<!DOCTYPE html>\n")
|
||
+ io.WriteString(w, "<html>\n")
|
||
+ }
|
||
+ io.WriteString(w, "<head>\n")
|
||
+ io.WriteString(w, " <title>")
|
||
+ if r.Flags&Smartypants != 0 {
|
||
+ r.sr.Process(w, []byte(r.Title))
|
||
+ } else {
|
||
+ escapeHTML(w, []byte(r.Title))
|
||
+ }
|
||
+ io.WriteString(w, "</title>\n")
|
||
+ io.WriteString(w, " <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
|
||
+ io.WriteString(w, Version)
|
||
+ io.WriteString(w, "\"")
|
||
+ io.WriteString(w, ending)
|
||
+ io.WriteString(w, ">\n")
|
||
+ io.WriteString(w, " <meta charset=\"utf-8\"")
|
||
+ io.WriteString(w, ending)
|
||
+ io.WriteString(w, ">\n")
|
||
+ if r.CSS != "" {
|
||
+ io.WriteString(w, " <link rel=\"stylesheet\" type=\"text/css\" href=\"")
|
||
+ escapeHTML(w, []byte(r.CSS))
|
||
+ io.WriteString(w, "\"")
|
||
+ io.WriteString(w, ending)
|
||
+ io.WriteString(w, ">\n")
|
||
+ }
|
||
+ if r.Icon != "" {
|
||
+ io.WriteString(w, " <link rel=\"icon\" type=\"image/x-icon\" href=\"")
|
||
+ escapeHTML(w, []byte(r.Icon))
|
||
+ io.WriteString(w, "\"")
|
||
+ io.WriteString(w, ending)
|
||
+ io.WriteString(w, ">\n")
|
||
+ }
|
||
+ io.WriteString(w, "</head>\n")
|
||
+ io.WriteString(w, "<body>\n\n")
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) {
|
||
+ buf := bytes.Buffer{}
|
||
+
|
||
+ inHeading := false
|
||
+ tocLevel := 0
|
||
+ headingCount := 0
|
||
+
|
||
+ ast.Walk(func(node *Node, entering bool) WalkStatus {
|
||
+ if node.Type == Heading && !node.HeadingData.IsTitleblock {
|
||
+ inHeading = entering
|
||
+ if entering {
|
||
+ node.HeadingID = fmt.Sprintf("toc_%d", headingCount)
|
||
+ if node.Level == tocLevel {
|
||
+ buf.WriteString("</li>\n\n<li>")
|
||
+ } else if node.Level < tocLevel {
|
||
+ for node.Level < tocLevel {
|
||
+ tocLevel--
|
||
+ buf.WriteString("</li>\n</ul>")
|
||
+ }
|
||
+ buf.WriteString("</li>\n\n<li>")
|
||
+ } else {
|
||
+ for node.Level > tocLevel {
|
||
+ tocLevel++
|
||
+ buf.WriteString("\n<ul>\n<li>")
|
||
+ }
|
||
+ }
|
||
+
|
||
+ fmt.Fprintf(&buf, `<a href="#toc_%d">`, headingCount)
|
||
+ headingCount++
|
||
+ } else {
|
||
+ buf.WriteString("</a>")
|
||
+ }
|
||
+ return GoToNext
|
||
+ }
|
||
+
|
||
+ if inHeading {
|
||
+ return r.RenderNode(&buf, node, entering)
|
||
+ }
|
||
+
|
||
+ return GoToNext
|
||
+ })
|
||
+
|
||
+ for ; tocLevel > 0; tocLevel-- {
|
||
+ buf.WriteString("</li>\n</ul>")
|
||
+ }
|
||
+
|
||
+ if buf.Len() > 0 {
|
||
+ io.WriteString(w, "<nav>\n")
|
||
+ w.Write(buf.Bytes())
|
||
+ io.WriteString(w, "\n\n</nav>\n")
|
||
+ }
|
||
+ r.lastOutputLen = buf.Len()
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go
|
||
new file mode 100644
|
||
index 000000000000..d45bd941726e
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/inline.go
|
||
@@ -0,0 +1,1228 @@
|
||
+//
|
||
+// Blackfriday Markdown Processor
|
||
+// Available at http://github.com/russross/blackfriday
|
||
+//
|
||
+// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||
+// Distributed under the Simplified BSD License.
|
||
+// See README.md for details.
|
||
+//
|
||
+
|
||
+//
|
||
+// Functions to parse inline elements.
|
||
+//
|
||
+
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "regexp"
|
||
+ "strconv"
|
||
+)
|
||
+
|
||
+var (
|
||
+ urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
|
||
+ anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
|
||
+
|
||
+ // https://www.w3.org/TR/html5/syntax.html#character-references
|
||
+ // highest unicode code point in 17 planes (2^20): 1,114,112d =
|
||
+ // 7 dec digits or 6 hex digits
|
||
+ // named entity references can be 2-31 characters with stuff like <
|
||
+ // at one end and ∳ at the other. There
|
||
+ // are also sometimes numbers at the end, although this isn't inherent
|
||
+ // in the specification; there are never numbers anywhere else in
|
||
+ // current character references, though; see ¾ and ▒, etc.
|
||
+ // https://www.w3.org/TR/html5/syntax.html#named-character-references
|
||
+ //
|
||
+ // entity := "&" (named group | number ref) ";"
|
||
+ // named group := [a-zA-Z]{2,31}[0-9]{0,2}
|
||
+ // number ref := "#" (dec ref | hex ref)
|
||
+ // dec ref := [0-9]{1,7}
|
||
+ // hex ref := ("x" | "X") [0-9a-fA-F]{1,6}
|
||
+ htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`)
|
||
+)
|
||
+
|
||
+// Functions to parse text within a block
|
||
+// Each function returns the number of chars taken care of
|
||
+// data is the complete block being rendered
|
||
+// offset is the number of valid chars before the current cursor
|
||
+
|
||
+func (p *Markdown) inline(currBlock *Node, data []byte) {
|
||
+ // handlers might call us recursively: enforce a maximum depth
|
||
+ if p.nesting >= p.maxNesting || len(data) == 0 {
|
||
+ return
|
||
+ }
|
||
+ p.nesting++
|
||
+ beg, end := 0, 0
|
||
+ for end < len(data) {
|
||
+ handler := p.inlineCallback[data[end]]
|
||
+ if handler != nil {
|
||
+ if consumed, node := handler(p, data, end); consumed == 0 {
|
||
+ // No action from the callback.
|
||
+ end++
|
||
+ } else {
|
||
+ // Copy inactive chars into the output.
|
||
+ currBlock.AppendChild(text(data[beg:end]))
|
||
+ if node != nil {
|
||
+ currBlock.AppendChild(node)
|
||
+ }
|
||
+ // Skip past whatever the callback used.
|
||
+ beg = end + consumed
|
||
+ end = beg
|
||
+ }
|
||
+ } else {
|
||
+ end++
|
||
+ }
|
||
+ }
|
||
+ if beg < len(data) {
|
||
+ if data[end-1] == '\n' {
|
||
+ end--
|
||
+ }
|
||
+ currBlock.AppendChild(text(data[beg:end]))
|
||
+ }
|
||
+ p.nesting--
|
||
+}
|
||
+
|
||
+// single and double emphasis parsing
|
||
+func emphasis(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ data = data[offset:]
|
||
+ c := data[0]
|
||
+
|
||
+ if len(data) > 2 && data[1] != c {
|
||
+ // whitespace cannot follow an opening emphasis;
|
||
+ // strikethrough only takes two characters '~~'
|
||
+ if c == '~' || isspace(data[1]) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ ret, node := helperEmphasis(p, data[1:], c)
|
||
+ if ret == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ return ret + 1, node
|
||
+ }
|
||
+
|
||
+ if len(data) > 3 && data[1] == c && data[2] != c {
|
||
+ if isspace(data[2]) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ ret, node := helperDoubleEmphasis(p, data[2:], c)
|
||
+ if ret == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ return ret + 2, node
|
||
+ }
|
||
+
|
||
+ if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {
|
||
+ if c == '~' || isspace(data[3]) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ ret, node := helperTripleEmphasis(p, data, 3, c)
|
||
+ if ret == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ return ret + 3, node
|
||
+ }
|
||
+
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ data = data[offset:]
|
||
+
|
||
+ nb := 0
|
||
+
|
||
+ // count the number of backticks in the delimiter
|
||
+ for nb < len(data) && data[nb] == '`' {
|
||
+ nb++
|
||
+ }
|
||
+
|
||
+ // find the next delimiter
|
||
+ i, end := 0, 0
|
||
+ for end = nb; end < len(data) && i < nb; end++ {
|
||
+ if data[end] == '`' {
|
||
+ i++
|
||
+ } else {
|
||
+ i = 0
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // no matching delimiter?
|
||
+ if i < nb && end >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ // trim outside whitespace
|
||
+ fBegin := nb
|
||
+ for fBegin < end && data[fBegin] == ' ' {
|
||
+ fBegin++
|
||
+ }
|
||
+
|
||
+ fEnd := end - nb
|
||
+ for fEnd > fBegin && data[fEnd-1] == ' ' {
|
||
+ fEnd--
|
||
+ }
|
||
+
|
||
+ // render the code span
|
||
+ if fBegin != fEnd {
|
||
+ code := NewNode(Code)
|
||
+ code.Literal = data[fBegin:fEnd]
|
||
+ return end, code
|
||
+ }
|
||
+
|
||
+ return end, nil
|
||
+}
|
||
+
|
||
+// newline preceded by two spaces becomes <br>
|
||
+func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ origOffset := offset
|
||
+ for offset < len(data) && data[offset] == ' ' {
|
||
+ offset++
|
||
+ }
|
||
+
|
||
+ if offset < len(data) && data[offset] == '\n' {
|
||
+ if offset-origOffset >= 2 {
|
||
+ return offset - origOffset + 1, NewNode(Hardbreak)
|
||
+ }
|
||
+ return offset - origOffset, nil
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+// newline without two spaces works when HardLineBreak is enabled
|
||
+func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ if p.extensions&HardLineBreak != 0 {
|
||
+ return 1, NewNode(Hardbreak)
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+type linkType int
|
||
+
|
||
+const (
|
||
+ linkNormal linkType = iota
|
||
+ linkImg
|
||
+ linkDeferredFootnote
|
||
+ linkInlineFootnote
|
||
+)
|
||
+
|
||
+func isReferenceStyleLink(data []byte, pos int, t linkType) bool {
|
||
+ if t == linkDeferredFootnote {
|
||
+ return false
|
||
+ }
|
||
+ return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^'
|
||
+}
|
||
+
|
||
+func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ if offset < len(data)-1 && data[offset+1] == '[' {
|
||
+ return link(p, data, offset)
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ if offset < len(data)-1 && data[offset+1] == '[' {
|
||
+ return link(p, data, offset)
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+// '[': parse a link or an image or a footnote
|
||
+func link(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ // no links allowed inside regular links, footnote, and deferred footnotes
|
||
+ if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ var t linkType
|
||
+ switch {
|
||
+ // special case: ![^text] == deferred footnote (that follows something with
|
||
+ // an exclamation point)
|
||
+ case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^':
|
||
+ t = linkDeferredFootnote
|
||
+ // ![alt] == image
|
||
+ case offset >= 0 && data[offset] == '!':
|
||
+ t = linkImg
|
||
+ offset++
|
||
+ // ^[text] == inline footnote
|
||
+ // [^refId] == deferred footnote
|
||
+ case p.extensions&Footnotes != 0:
|
||
+ if offset >= 0 && data[offset] == '^' {
|
||
+ t = linkInlineFootnote
|
||
+ offset++
|
||
+ } else if len(data)-1 > offset && data[offset+1] == '^' {
|
||
+ t = linkDeferredFootnote
|
||
+ }
|
||
+ // [text] == regular link
|
||
+ default:
|
||
+ t = linkNormal
|
||
+ }
|
||
+
|
||
+ data = data[offset:]
|
||
+
|
||
+ var (
|
||
+ i = 1
|
||
+ noteID int
|
||
+ title, link, altContent []byte
|
||
+ textHasNl = false
|
||
+ )
|
||
+
|
||
+ if t == linkDeferredFootnote {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // look for the matching closing bracket
|
||
+ for level := 1; level > 0 && i < len(data); i++ {
|
||
+ switch {
|
||
+ case data[i] == '\n':
|
||
+ textHasNl = true
|
||
+
|
||
+ case isBackslashEscaped(data, i):
|
||
+ continue
|
||
+
|
||
+ case data[i] == '[':
|
||
+ level++
|
||
+
|
||
+ case data[i] == ']':
|
||
+ level--
|
||
+ if level <= 0 {
|
||
+ i-- // compensate for extra i++ in for loop
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if i >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ txtE := i
|
||
+ i++
|
||
+ var footnoteNode *Node
|
||
+
|
||
+ // skip any amount of whitespace or newline
|
||
+ // (this is much more lax than original markdown syntax)
|
||
+ for i < len(data) && isspace(data[i]) {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // inline style link
|
||
+ switch {
|
||
+ case i < len(data) && data[i] == '(':
|
||
+ // skip initial whitespace
|
||
+ i++
|
||
+
|
||
+ for i < len(data) && isspace(data[i]) {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ linkB := i
|
||
+
|
||
+ // look for link end: ' " )
|
||
+ findlinkend:
|
||
+ for i < len(data) {
|
||
+ switch {
|
||
+ case data[i] == '\\':
|
||
+ i += 2
|
||
+
|
||
+ case data[i] == ')' || data[i] == '\'' || data[i] == '"':
|
||
+ break findlinkend
|
||
+
|
||
+ default:
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if i >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ linkE := i
|
||
+
|
||
+ // look for title end if present
|
||
+ titleB, titleE := 0, 0
|
||
+ if data[i] == '\'' || data[i] == '"' {
|
||
+ i++
|
||
+ titleB = i
|
||
+
|
||
+ findtitleend:
|
||
+ for i < len(data) {
|
||
+ switch {
|
||
+ case data[i] == '\\':
|
||
+ i += 2
|
||
+
|
||
+ case data[i] == ')':
|
||
+ break findtitleend
|
||
+
|
||
+ default:
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if i >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ // skip whitespace after title
|
||
+ titleE = i - 1
|
||
+ for titleE > titleB && isspace(data[titleE]) {
|
||
+ titleE--
|
||
+ }
|
||
+
|
||
+ // check for closing quote presence
|
||
+ if data[titleE] != '\'' && data[titleE] != '"' {
|
||
+ titleB, titleE = 0, 0
|
||
+ linkE = i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // remove whitespace at the end of the link
|
||
+ for linkE > linkB && isspace(data[linkE-1]) {
|
||
+ linkE--
|
||
+ }
|
||
+
|
||
+ // remove optional angle brackets around the link
|
||
+ if data[linkB] == '<' {
|
||
+ linkB++
|
||
+ }
|
||
+ if data[linkE-1] == '>' {
|
||
+ linkE--
|
||
+ }
|
||
+
|
||
+ // build escaped link and title
|
||
+ if linkE > linkB {
|
||
+ link = data[linkB:linkE]
|
||
+ }
|
||
+
|
||
+ if titleE > titleB {
|
||
+ title = data[titleB:titleE]
|
||
+ }
|
||
+
|
||
+ i++
|
||
+
|
||
+ // reference style link
|
||
+ case isReferenceStyleLink(data, i, t):
|
||
+ var id []byte
|
||
+ altContentConsidered := false
|
||
+
|
||
+ // look for the id
|
||
+ i++
|
||
+ linkB := i
|
||
+ for i < len(data) && data[i] != ']' {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ linkE := i
|
||
+
|
||
+ // find the reference
|
||
+ if linkB == linkE {
|
||
+ if textHasNl {
|
||
+ var b bytes.Buffer
|
||
+
|
||
+ for j := 1; j < txtE; j++ {
|
||
+ switch {
|
||
+ case data[j] != '\n':
|
||
+ b.WriteByte(data[j])
|
||
+ case data[j-1] != ' ':
|
||
+ b.WriteByte(' ')
|
||
+ }
|
||
+ }
|
||
+
|
||
+ id = b.Bytes()
|
||
+ } else {
|
||
+ id = data[1:txtE]
|
||
+ altContentConsidered = true
|
||
+ }
|
||
+ } else {
|
||
+ id = data[linkB:linkE]
|
||
+ }
|
||
+
|
||
+ // find the reference with matching id
|
||
+ lr, ok := p.getRef(string(id))
|
||
+ if !ok {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ // keep link and title from reference
|
||
+ link = lr.link
|
||
+ title = lr.title
|
||
+ if altContentConsidered {
|
||
+ altContent = lr.text
|
||
+ }
|
||
+ i++
|
||
+
|
||
+ // shortcut reference style link or reference or inline footnote
|
||
+ default:
|
||
+ var id []byte
|
||
+
|
||
+ // craft the id
|
||
+ if textHasNl {
|
||
+ var b bytes.Buffer
|
||
+
|
||
+ for j := 1; j < txtE; j++ {
|
||
+ switch {
|
||
+ case data[j] != '\n':
|
||
+ b.WriteByte(data[j])
|
||
+ case data[j-1] != ' ':
|
||
+ b.WriteByte(' ')
|
||
+ }
|
||
+ }
|
||
+
|
||
+ id = b.Bytes()
|
||
+ } else {
|
||
+ if t == linkDeferredFootnote {
|
||
+ id = data[2:txtE] // get rid of the ^
|
||
+ } else {
|
||
+ id = data[1:txtE]
|
||
+ }
|
||
+ }
|
||
+
|
||
+ footnoteNode = NewNode(Item)
|
||
+ if t == linkInlineFootnote {
|
||
+ // create a new reference
|
||
+ noteID = len(p.notes) + 1
|
||
+
|
||
+ var fragment []byte
|
||
+ if len(id) > 0 {
|
||
+ if len(id) < 16 {
|
||
+ fragment = make([]byte, len(id))
|
||
+ } else {
|
||
+ fragment = make([]byte, 16)
|
||
+ }
|
||
+ copy(fragment, slugify(id))
|
||
+ } else {
|
||
+ fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...)
|
||
+ }
|
||
+
|
||
+ ref := &reference{
|
||
+ noteID: noteID,
|
||
+ hasBlock: false,
|
||
+ link: fragment,
|
||
+ title: id,
|
||
+ footnote: footnoteNode,
|
||
+ }
|
||
+
|
||
+ p.notes = append(p.notes, ref)
|
||
+
|
||
+ link = ref.link
|
||
+ title = ref.title
|
||
+ } else {
|
||
+ // find the reference with matching id
|
||
+ lr, ok := p.getRef(string(id))
|
||
+ if !ok {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ if t == linkDeferredFootnote {
|
||
+ lr.noteID = len(p.notes) + 1
|
||
+ lr.footnote = footnoteNode
|
||
+ p.notes = append(p.notes, lr)
|
||
+ }
|
||
+
|
||
+ // keep link and title from reference
|
||
+ link = lr.link
|
||
+ // if inline footnote, title == footnote contents
|
||
+ title = lr.title
|
||
+ noteID = lr.noteID
|
||
+ }
|
||
+
|
||
+ // rewind the whitespace
|
||
+ i = txtE + 1
|
||
+ }
|
||
+
|
||
+ var uLink []byte
|
||
+ if t == linkNormal || t == linkImg {
|
||
+ if len(link) > 0 {
|
||
+ var uLinkBuf bytes.Buffer
|
||
+ unescapeText(&uLinkBuf, link)
|
||
+ uLink = uLinkBuf.Bytes()
|
||
+ }
|
||
+
|
||
+ // links need something to click on and somewhere to go
|
||
+ if len(uLink) == 0 || (t == linkNormal && txtE <= 1) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // call the relevant rendering function
|
||
+ var linkNode *Node
|
||
+ switch t {
|
||
+ case linkNormal:
|
||
+ linkNode = NewNode(Link)
|
||
+ linkNode.Destination = normalizeURI(uLink)
|
||
+ linkNode.Title = title
|
||
+ if len(altContent) > 0 {
|
||
+ linkNode.AppendChild(text(altContent))
|
||
+ } else {
|
||
+ // links cannot contain other links, so turn off link parsing
|
||
+ // temporarily and recurse
|
||
+ insideLink := p.insideLink
|
||
+ p.insideLink = true
|
||
+ p.inline(linkNode, data[1:txtE])
|
||
+ p.insideLink = insideLink
|
||
+ }
|
||
+
|
||
+ case linkImg:
|
||
+ linkNode = NewNode(Image)
|
||
+ linkNode.Destination = uLink
|
||
+ linkNode.Title = title
|
||
+ linkNode.AppendChild(text(data[1:txtE]))
|
||
+ i++
|
||
+
|
||
+ case linkInlineFootnote, linkDeferredFootnote:
|
||
+ linkNode = NewNode(Link)
|
||
+ linkNode.Destination = link
|
||
+ linkNode.Title = title
|
||
+ linkNode.NoteID = noteID
|
||
+ linkNode.Footnote = footnoteNode
|
||
+ if t == linkInlineFootnote {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ default:
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ return i, linkNode
|
||
+}
|
||
+
|
||
+func (p *Markdown) inlineHTMLComment(data []byte) int {
|
||
+ if len(data) < 5 {
|
||
+ return 0
|
||
+ }
|
||
+ if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' {
|
||
+ return 0
|
||
+ }
|
||
+ i := 5
|
||
+ // scan for an end-of-comment marker, across lines if necessary
|
||
+ for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
|
||
+ i++
|
||
+ }
|
||
+ // no end-of-comment marker
|
||
+ if i >= len(data) {
|
||
+ return 0
|
||
+ }
|
||
+ return i + 1
|
||
+}
|
||
+
|
||
+func stripMailto(link []byte) []byte {
|
||
+ if bytes.HasPrefix(link, []byte("mailto://")) {
|
||
+ return link[9:]
|
||
+ } else if bytes.HasPrefix(link, []byte("mailto:")) {
|
||
+ return link[7:]
|
||
+ } else {
|
||
+ return link
|
||
+ }
|
||
+}
|
||
+
|
||
+// autolinkType specifies a kind of autolink that gets detected.
|
||
+type autolinkType int
|
||
+
|
||
+// These are the possible flag values for the autolink renderer.
|
||
+const (
|
||
+ notAutolink autolinkType = iota
|
||
+ normalAutolink
|
||
+ emailAutolink
|
||
+)
|
||
+
|
||
+// '<' when tags or autolinks are allowed
|
||
+func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ data = data[offset:]
|
||
+ altype, end := tagLength(data)
|
||
+ if size := p.inlineHTMLComment(data); size > 0 {
|
||
+ end = size
|
||
+ }
|
||
+ if end > 2 {
|
||
+ if altype != notAutolink {
|
||
+ var uLink bytes.Buffer
|
||
+ unescapeText(&uLink, data[1:end+1-2])
|
||
+ if uLink.Len() > 0 {
|
||
+ link := uLink.Bytes()
|
||
+ node := NewNode(Link)
|
||
+ node.Destination = link
|
||
+ if altype == emailAutolink {
|
||
+ node.Destination = append([]byte("mailto:"), link...)
|
||
+ }
|
||
+ node.AppendChild(text(stripMailto(link)))
|
||
+ return end, node
|
||
+ }
|
||
+ } else {
|
||
+ htmlTag := NewNode(HTMLSpan)
|
||
+ htmlTag.Literal = data[:end]
|
||
+ return end, htmlTag
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return end, nil
|
||
+}
|
||
+
|
||
+// '\\' backslash escape
|
||
+var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
|
||
+
|
||
+func escape(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ data = data[offset:]
|
||
+
|
||
+ if len(data) > 1 {
|
||
+ if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' {
|
||
+ return 2, NewNode(Hardbreak)
|
||
+ }
|
||
+ if bytes.IndexByte(escapeChars, data[1]) < 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ return 2, text(data[1:2])
|
||
+ }
|
||
+
|
||
+ return 2, nil
|
||
+}
|
||
+
|
||
+func unescapeText(ob *bytes.Buffer, src []byte) {
|
||
+ i := 0
|
||
+ for i < len(src) {
|
||
+ org := i
|
||
+ for i < len(src) && src[i] != '\\' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ if i > org {
|
||
+ ob.Write(src[org:i])
|
||
+ }
|
||
+
|
||
+ if i+1 >= len(src) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ ob.WriteByte(src[i+1])
|
||
+ i += 2
|
||
+ }
|
||
+}
|
||
+
|
||
+// '&' escaped when it doesn't belong to an entity
|
||
+// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
|
||
+func entity(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ data = data[offset:]
|
||
+
|
||
+ end := 1
|
||
+
|
||
+ if end < len(data) && data[end] == '#' {
|
||
+ end++
|
||
+ }
|
||
+
|
||
+ for end < len(data) && isalnum(data[end]) {
|
||
+ end++
|
||
+ }
|
||
+
|
||
+ if end < len(data) && data[end] == ';' {
|
||
+ end++ // real entity
|
||
+ } else {
|
||
+ return 0, nil // lone '&'
|
||
+ }
|
||
+
|
||
+ ent := data[:end]
|
||
+ // undo & escaping or it will be converted to &amp; by another
|
||
+ // escaper in the renderer
|
||
+ if bytes.Equal(ent, []byte("&")) {
|
||
+ ent = []byte{'&'}
|
||
+ }
|
||
+
|
||
+ return end, text(ent)
|
||
+}
|
||
+
|
||
+func linkEndsWithEntity(data []byte, linkEnd int) bool {
|
||
+ entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1)
|
||
+ return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd
|
||
+}
|
||
+
|
||
+// hasPrefixCaseInsensitive is a custom implementation of
|
||
+// strings.HasPrefix(strings.ToLower(s), prefix)
|
||
+// we rolled our own because ToLower pulls in a huge machinery of lowercasing
|
||
+// anything from Unicode and that's very slow. Since this func will only be
|
||
+// used on ASCII protocol prefixes, we can take shortcuts.
|
||
+func hasPrefixCaseInsensitive(s, prefix []byte) bool {
|
||
+ if len(s) < len(prefix) {
|
||
+ return false
|
||
+ }
|
||
+ delta := byte('a' - 'A')
|
||
+ for i, b := range prefix {
|
||
+ if b != s[i] && b != s[i]+delta {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+var protocolPrefixes = [][]byte{
|
||
+ []byte("http://"),
|
||
+ []byte("https://"),
|
||
+ []byte("ftp://"),
|
||
+ []byte("file://"),
|
||
+ []byte("mailto:"),
|
||
+}
|
||
+
|
||
+const shortestPrefix = 6 // len("ftp://"), the shortest of the above
|
||
+
|
||
+func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ // quick check to rule out most false hits
|
||
+ if p.insideLink || len(data) < offset+shortestPrefix {
|
||
+ return 0, nil
|
||
+ }
|
||
+ for _, prefix := range protocolPrefixes {
|
||
+ endOfHead := offset + 8 // 8 is the len() of the longest prefix
|
||
+ if endOfHead > len(data) {
|
||
+ endOfHead = len(data)
|
||
+ }
|
||
+ if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) {
|
||
+ return autoLink(p, data, offset)
|
||
+ }
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func autoLink(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ // Now a more expensive check to see if we're not inside an anchor element
|
||
+ anchorStart := offset
|
||
+ offsetFromAnchor := 0
|
||
+ for anchorStart > 0 && data[anchorStart] != '<' {
|
||
+ anchorStart--
|
||
+ offsetFromAnchor++
|
||
+ }
|
||
+
|
||
+ anchorStr := anchorRe.Find(data[anchorStart:])
|
||
+ if anchorStr != nil {
|
||
+ anchorClose := NewNode(HTMLSpan)
|
||
+ anchorClose.Literal = anchorStr[offsetFromAnchor:]
|
||
+ return len(anchorStr) - offsetFromAnchor, anchorClose
|
||
+ }
|
||
+
|
||
+ // scan backward for a word boundary
|
||
+ rewind := 0
|
||
+ for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) {
|
||
+ rewind++
|
||
+ }
|
||
+ if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ origData := data
|
||
+ data = data[offset-rewind:]
|
||
+
|
||
+ if !isSafeLink(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ linkEnd := 0
|
||
+ for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) {
|
||
+ linkEnd++
|
||
+ }
|
||
+
|
||
+ // Skip punctuation at the end of the link
|
||
+ if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' {
|
||
+ linkEnd--
|
||
+ }
|
||
+
|
||
+ // But don't skip semicolon if it's a part of escaped entity:
|
||
+ if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) {
|
||
+ linkEnd--
|
||
+ }
|
||
+
|
||
+ // See if the link finishes with a punctuation sign that can be closed.
|
||
+ var copen byte
|
||
+ switch data[linkEnd-1] {
|
||
+ case '"':
|
||
+ copen = '"'
|
||
+ case '\'':
|
||
+ copen = '\''
|
||
+ case ')':
|
||
+ copen = '('
|
||
+ case ']':
|
||
+ copen = '['
|
||
+ case '}':
|
||
+ copen = '{'
|
||
+ default:
|
||
+ copen = 0
|
||
+ }
|
||
+
|
||
+ if copen != 0 {
|
||
+ bufEnd := offset - rewind + linkEnd - 2
|
||
+
|
||
+ openDelim := 1
|
||
+
|
||
+ /* Try to close the final punctuation sign in this same line;
|
||
+ * if we managed to close it outside of the URL, that means that it's
|
||
+ * not part of the URL. If it closes inside the URL, that means it
|
||
+ * is part of the URL.
|
||
+ *
|
||
+ * Examples:
|
||
+ *
|
||
+ * foo http://www.pokemon.com/Pikachu_(Electric) bar
|
||
+ * => http://www.pokemon.com/Pikachu_(Electric)
|
||
+ *
|
||
+ * foo (http://www.pokemon.com/Pikachu_(Electric)) bar
|
||
+ * => http://www.pokemon.com/Pikachu_(Electric)
|
||
+ *
|
||
+ * foo http://www.pokemon.com/Pikachu_(Electric)) bar
|
||
+ * => http://www.pokemon.com/Pikachu_(Electric))
|
||
+ *
|
||
+ * (foo http://www.pokemon.com/Pikachu_(Electric)) bar
|
||
+ * => foo http://www.pokemon.com/Pikachu_(Electric)
|
||
+ */
|
||
+
|
||
+ for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 {
|
||
+ if origData[bufEnd] == data[linkEnd-1] {
|
||
+ openDelim++
|
||
+ }
|
||
+
|
||
+ if origData[bufEnd] == copen {
|
||
+ openDelim--
|
||
+ }
|
||
+
|
||
+ bufEnd--
|
||
+ }
|
||
+
|
||
+ if openDelim == 0 {
|
||
+ linkEnd--
|
||
+ }
|
||
+ }
|
||
+
|
||
+ var uLink bytes.Buffer
|
||
+ unescapeText(&uLink, data[:linkEnd])
|
||
+
|
||
+ if uLink.Len() > 0 {
|
||
+ node := NewNode(Link)
|
||
+ node.Destination = uLink.Bytes()
|
||
+ node.AppendChild(text(uLink.Bytes()))
|
||
+ return linkEnd, node
|
||
+ }
|
||
+
|
||
+ return linkEnd, nil
|
||
+}
|
||
+
|
||
+func isEndOfLink(char byte) bool {
|
||
+ return isspace(char) || char == '<'
|
||
+}
|
||
+
|
||
+var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
|
||
+var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
|
||
+
|
||
+func isSafeLink(link []byte) bool {
|
||
+ for _, path := range validPaths {
|
||
+ if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
|
||
+ if len(link) == len(path) {
|
||
+ return true
|
||
+ } else if isalnum(link[len(path)]) {
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ for _, prefix := range validUris {
|
||
+ // TODO: handle unicode here
|
||
+ // case-insensitive prefix test
|
||
+ if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) {
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return false
|
||
+}
|
||
+
|
||
+// return the length of the given tag, or 0 is it's not valid
|
||
+func tagLength(data []byte) (autolink autolinkType, end int) {
|
||
+ var i, j int
|
||
+
|
||
+ // a valid tag can't be shorter than 3 chars
|
||
+ if len(data) < 3 {
|
||
+ return notAutolink, 0
|
||
+ }
|
||
+
|
||
+ // begins with a '<' optionally followed by '/', followed by letter or number
|
||
+ if data[0] != '<' {
|
||
+ return notAutolink, 0
|
||
+ }
|
||
+ if data[1] == '/' {
|
||
+ i = 2
|
||
+ } else {
|
||
+ i = 1
|
||
+ }
|
||
+
|
||
+ if !isalnum(data[i]) {
|
||
+ return notAutolink, 0
|
||
+ }
|
||
+
|
||
+ // scheme test
|
||
+ autolink = notAutolink
|
||
+
|
||
+ // try to find the beginning of an URI
|
||
+ for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ if i > 1 && i < len(data) && data[i] == '@' {
|
||
+ if j = isMailtoAutoLink(data[i:]); j != 0 {
|
||
+ return emailAutolink, i + j
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if i > 2 && i < len(data) && data[i] == ':' {
|
||
+ autolink = normalAutolink
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // complete autolink test: no whitespace or ' or "
|
||
+ switch {
|
||
+ case i >= len(data):
|
||
+ autolink = notAutolink
|
||
+ case autolink != notAutolink:
|
||
+ j = i
|
||
+
|
||
+ for i < len(data) {
|
||
+ if data[i] == '\\' {
|
||
+ i += 2
|
||
+ } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) {
|
||
+ break
|
||
+ } else {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ }
|
||
+
|
||
+ if i >= len(data) {
|
||
+ return autolink, 0
|
||
+ }
|
||
+ if i > j && data[i] == '>' {
|
||
+ return autolink, i + 1
|
||
+ }
|
||
+
|
||
+ // one of the forbidden chars has been found
|
||
+ autolink = notAutolink
|
||
+ }
|
||
+ i += bytes.IndexByte(data[i:], '>')
|
||
+ if i < 0 {
|
||
+ return autolink, 0
|
||
+ }
|
||
+ return autolink, i + 1
|
||
+}
|
||
+
|
||
+// look for the address part of a mail autolink and '>'
|
||
+// this is less strict than the original markdown e-mail address matching
|
||
+func isMailtoAutoLink(data []byte) int {
|
||
+ nb := 0
|
||
+
|
||
+ // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
|
||
+ for i := 0; i < len(data); i++ {
|
||
+ if isalnum(data[i]) {
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ switch data[i] {
|
||
+ case '@':
|
||
+ nb++
|
||
+
|
||
+ case '-', '.', '_':
|
||
+ break
|
||
+
|
||
+ case '>':
|
||
+ if nb == 1 {
|
||
+ return i + 1
|
||
+ }
|
||
+ return 0
|
||
+ default:
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return 0
|
||
+}
|
||
+
|
||
+// look for the next emph char, skipping other constructs
|
||
+func helperFindEmphChar(data []byte, c byte) int {
|
||
+ i := 0
|
||
+
|
||
+ for i < len(data) {
|
||
+ for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return 0
|
||
+ }
|
||
+ // do not count escaped chars
|
||
+ if i != 0 && data[i-1] == '\\' {
|
||
+ i++
|
||
+ continue
|
||
+ }
|
||
+ if data[i] == c {
|
||
+ return i
|
||
+ }
|
||
+
|
||
+ if data[i] == '`' {
|
||
+ // skip a code span
|
||
+ tmpI := 0
|
||
+ i++
|
||
+ for i < len(data) && data[i] != '`' {
|
||
+ if tmpI == 0 && data[i] == c {
|
||
+ tmpI = i
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return tmpI
|
||
+ }
|
||
+ i++
|
||
+ } else if data[i] == '[' {
|
||
+ // skip a link
|
||
+ tmpI := 0
|
||
+ i++
|
||
+ for i < len(data) && data[i] != ']' {
|
||
+ if tmpI == 0 && data[i] == c {
|
||
+ tmpI = i
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ i++
|
||
+ for i < len(data) && (data[i] == ' ' || data[i] == '\n') {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return tmpI
|
||
+ }
|
||
+ if data[i] != '[' && data[i] != '(' { // not a link
|
||
+ if tmpI > 0 {
|
||
+ return tmpI
|
||
+ }
|
||
+ continue
|
||
+ }
|
||
+ cc := data[i]
|
||
+ i++
|
||
+ for i < len(data) && data[i] != cc {
|
||
+ if tmpI == 0 && data[i] == c {
|
||
+ return i
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return tmpI
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) {
|
||
+ i := 0
|
||
+
|
||
+ // skip one symbol if coming from emph3
|
||
+ if len(data) > 1 && data[0] == c && data[1] == c {
|
||
+ i = 1
|
||
+ }
|
||
+
|
||
+ for i < len(data) {
|
||
+ length := helperFindEmphChar(data[i:], c)
|
||
+ if length == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+ i += length
|
||
+ if i >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ if i+1 < len(data) && data[i+1] == c {
|
||
+ i++
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ if data[i] == c && !isspace(data[i-1]) {
|
||
+
|
||
+ if p.extensions&NoIntraEmphasis != 0 {
|
||
+ if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) {
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+
|
||
+ emph := NewNode(Emph)
|
||
+ p.inline(emph, data[:i])
|
||
+ return i + 1, emph
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) {
|
||
+ i := 0
|
||
+
|
||
+ for i < len(data) {
|
||
+ length := helperFindEmphChar(data[i:], c)
|
||
+ if length == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+ i += length
|
||
+
|
||
+ if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) {
|
||
+ nodeType := Strong
|
||
+ if c == '~' {
|
||
+ nodeType = Del
|
||
+ }
|
||
+ node := NewNode(nodeType)
|
||
+ p.inline(node, data[:i])
|
||
+ return i + 2, node
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) {
|
||
+ i := 0
|
||
+ origData := data
|
||
+ data = data[offset:]
|
||
+
|
||
+ for i < len(data) {
|
||
+ length := helperFindEmphChar(data[i:], c)
|
||
+ if length == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+ i += length
|
||
+
|
||
+ // skip whitespace preceded symbols
|
||
+ if data[i] != c || isspace(data[i-1]) {
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ switch {
|
||
+ case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
|
||
+ // triple symbol found
|
||
+ strong := NewNode(Strong)
|
||
+ em := NewNode(Emph)
|
||
+ strong.AppendChild(em)
|
||
+ p.inline(em, data[:i])
|
||
+ return i + 3, strong
|
||
+ case (i+1 < len(data) && data[i+1] == c):
|
||
+ // double symbol found, hand over to emph1
|
||
+ length, node := helperEmphasis(p, origData[offset-2:], c)
|
||
+ if length == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+ return length - 2, node
|
||
+ default:
|
||
+ // single symbol found, hand over to emph2
|
||
+ length, node := helperDoubleEmphasis(p, origData[offset-1:], c)
|
||
+ if length == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+ return length - 1, node
|
||
+ }
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func text(s []byte) *Node {
|
||
+ node := NewNode(Text)
|
||
+ node.Literal = s
|
||
+ return node
|
||
+}
|
||
+
|
||
+func normalizeURI(s []byte) []byte {
|
||
+ return s // TODO: implement
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go
|
||
new file mode 100644
|
||
index 000000000000..58d2e4538c62
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/markdown.go
|
||
@@ -0,0 +1,950 @@
|
||
+// Blackfriday Markdown Processor
|
||
+// Available at http://github.com/russross/blackfriday
|
||
+//
|
||
+// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||
+// Distributed under the Simplified BSD License.
|
||
+// See README.md for details.
|
||
+
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "strings"
|
||
+ "unicode/utf8"
|
||
+)
|
||
+
|
||
+//
|
||
+// Markdown parsing and processing
|
||
+//
|
||
+
|
||
+// Version string of the package. Appears in the rendered document when
|
||
+// CompletePage flag is on.
|
||
+const Version = "2.0"
|
||
+
|
||
+// Extensions is a bitwise or'ed collection of enabled Blackfriday's
|
||
+// extensions.
|
||
+type Extensions int
|
||
+
|
||
+// These are the supported markdown parsing extensions.
|
||
+// OR these values together to select multiple extensions.
|
||
+const (
|
||
+ NoExtensions Extensions = 0
|
||
+ NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words
|
||
+ Tables // Render tables
|
||
+ FencedCode // Render fenced code blocks
|
||
+ Autolink // Detect embedded URLs that are not explicitly marked
|
||
+ Strikethrough // Strikethrough text using ~~test~~
|
||
+ LaxHTMLBlocks // Loosen up HTML block parsing rules
|
||
+ SpaceHeadings // Be strict about prefix heading rules
|
||
+ HardLineBreak // Translate newlines into line breaks
|
||
+ TabSizeEight // Expand tabs to eight spaces instead of four
|
||
+ Footnotes // Pandoc-style footnotes
|
||
+ NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
|
||
+ HeadingIDs // specify heading IDs with {#id}
|
||
+ Titleblock // Titleblock ala pandoc
|
||
+ AutoHeadingIDs // Create the heading ID from the text
|
||
+ BackslashLineBreak // Translate trailing backslashes into line breaks
|
||
+ DefinitionLists // Render definition lists
|
||
+
|
||
+ CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants |
|
||
+ SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes
|
||
+
|
||
+ CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
|
||
+ Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
|
||
+ BackslashLineBreak | DefinitionLists
|
||
+)
|
||
+
|
||
+// ListType contains bitwise or'ed flags for list and list item objects.
|
||
+type ListType int
|
||
+
|
||
+// These are the possible flag values for the ListItem renderer.
|
||
+// Multiple flag values may be ORed together.
|
||
+// These are mostly of interest if you are writing a new output format.
|
||
+const (
|
||
+ ListTypeOrdered ListType = 1 << iota
|
||
+ ListTypeDefinition
|
||
+ ListTypeTerm
|
||
+
|
||
+ ListItemContainsBlock
|
||
+ ListItemBeginningOfList // TODO: figure out if this is of any use now
|
||
+ ListItemEndOfList
|
||
+)
|
||
+
|
||
+// CellAlignFlags holds a type of alignment in a table cell.
|
||
+type CellAlignFlags int
|
||
+
|
||
+// These are the possible flag values for the table cell renderer.
|
||
+// Only a single one of these values will be used; they are not ORed together.
|
||
+// These are mostly of interest if you are writing a new output format.
|
||
+const (
|
||
+ TableAlignmentLeft CellAlignFlags = 1 << iota
|
||
+ TableAlignmentRight
|
||
+ TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight)
|
||
+)
|
||
+
|
||
+// The size of a tab stop.
|
||
+const (
|
||
+ TabSizeDefault = 4
|
||
+ TabSizeDouble = 8
|
||
+)
|
||
+
|
||
+// blockTags is a set of tags that are recognized as HTML block tags.
|
||
+// Any of these can be included in markdown text without special escaping.
|
||
+var blockTags = map[string]struct{}{
|
||
+ "blockquote": {},
|
||
+ "del": {},
|
||
+ "div": {},
|
||
+ "dl": {},
|
||
+ "fieldset": {},
|
||
+ "form": {},
|
||
+ "h1": {},
|
||
+ "h2": {},
|
||
+ "h3": {},
|
||
+ "h4": {},
|
||
+ "h5": {},
|
||
+ "h6": {},
|
||
+ "iframe": {},
|
||
+ "ins": {},
|
||
+ "math": {},
|
||
+ "noscript": {},
|
||
+ "ol": {},
|
||
+ "pre": {},
|
||
+ "p": {},
|
||
+ "script": {},
|
||
+ "style": {},
|
||
+ "table": {},
|
||
+ "ul": {},
|
||
+
|
||
+ // HTML5
|
||
+ "address": {},
|
||
+ "article": {},
|
||
+ "aside": {},
|
||
+ "canvas": {},
|
||
+ "figcaption": {},
|
||
+ "figure": {},
|
||
+ "footer": {},
|
||
+ "header": {},
|
||
+ "hgroup": {},
|
||
+ "main": {},
|
||
+ "nav": {},
|
||
+ "output": {},
|
||
+ "progress": {},
|
||
+ "section": {},
|
||
+ "video": {},
|
||
+}
|
||
+
|
||
+// Renderer is the rendering interface. This is mostly of interest if you are
|
||
+// implementing a new rendering format.
|
||
+//
|
||
+// Only an HTML implementation is provided in this repository, see the README
|
||
+// for external implementations.
|
||
+type Renderer interface {
|
||
+ // RenderNode is the main rendering method. It will be called once for
|
||
+ // every leaf node and twice for every non-leaf node (first with
|
||
+ // entering=true, then with entering=false). The method should write its
|
||
+ // rendition of the node to the supplied writer w.
|
||
+ RenderNode(w io.Writer, node *Node, entering bool) WalkStatus
|
||
+
|
||
+ // RenderHeader is a method that allows the renderer to produce some
|
||
+ // content preceding the main body of the output document. The header is
|
||
+ // understood in the broad sense here. For example, the default HTML
|
||
+ // renderer will write not only the HTML document preamble, but also the
|
||
+ // table of contents if it was requested.
|
||
+ //
|
||
+ // The method will be passed an entire document tree, in case a particular
|
||
+ // implementation needs to inspect it to produce output.
|
||
+ //
|
||
+ // The output should be written to the supplied writer w. If your
|
||
+ // implementation has no header to write, supply an empty implementation.
|
||
+ RenderHeader(w io.Writer, ast *Node)
|
||
+
|
||
+ // RenderFooter is a symmetric counterpart of RenderHeader.
|
||
+ RenderFooter(w io.Writer, ast *Node)
|
||
+}
|
||
+
|
||
+// Callback functions for inline parsing. One such function is defined
|
||
+// for each character that triggers a response when parsing inline data.
|
||
+type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node)
|
||
+
|
||
+// Markdown is a type that holds extensions and the runtime state used by
|
||
+// Parse, and the renderer. You can not use it directly, construct it with New.
|
||
+type Markdown struct {
|
||
+ renderer Renderer
|
||
+ referenceOverride ReferenceOverrideFunc
|
||
+ refs map[string]*reference
|
||
+ inlineCallback [256]inlineParser
|
||
+ extensions Extensions
|
||
+ nesting int
|
||
+ maxNesting int
|
||
+ insideLink bool
|
||
+
|
||
+ // Footnotes need to be ordered as well as available to quickly check for
|
||
+ // presence. If a ref is also a footnote, it's stored both in refs and here
|
||
+ // in notes. Slice is nil if footnotes not enabled.
|
||
+ notes []*reference
|
||
+
|
||
+ doc *Node
|
||
+ tip *Node // = doc
|
||
+ oldTip *Node
|
||
+ lastMatchedContainer *Node // = doc
|
||
+ allClosed bool
|
||
+}
|
||
+
|
||
+func (p *Markdown) getRef(refid string) (ref *reference, found bool) {
|
||
+ if p.referenceOverride != nil {
|
||
+ r, overridden := p.referenceOverride(refid)
|
||
+ if overridden {
|
||
+ if r == nil {
|
||
+ return nil, false
|
||
+ }
|
||
+ return &reference{
|
||
+ link: []byte(r.Link),
|
||
+ title: []byte(r.Title),
|
||
+ noteID: 0,
|
||
+ hasBlock: false,
|
||
+ text: []byte(r.Text)}, true
|
||
+ }
|
||
+ }
|
||
+ // refs are case insensitive
|
||
+ ref, found = p.refs[strings.ToLower(refid)]
|
||
+ return ref, found
|
||
+}
|
||
+
|
||
+func (p *Markdown) finalize(block *Node) {
|
||
+ above := block.Parent
|
||
+ block.open = false
|
||
+ p.tip = above
|
||
+}
|
||
+
|
||
+func (p *Markdown) addChild(node NodeType, offset uint32) *Node {
|
||
+ return p.addExistingChild(NewNode(node), offset)
|
||
+}
|
||
+
|
||
+func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node {
|
||
+ for !p.tip.canContain(node.Type) {
|
||
+ p.finalize(p.tip)
|
||
+ }
|
||
+ p.tip.AppendChild(node)
|
||
+ p.tip = node
|
||
+ return node
|
||
+}
|
||
+
|
||
+func (p *Markdown) closeUnmatchedBlocks() {
|
||
+ if !p.allClosed {
|
||
+ for p.oldTip != p.lastMatchedContainer {
|
||
+ parent := p.oldTip.Parent
|
||
+ p.finalize(p.oldTip)
|
||
+ p.oldTip = parent
|
||
+ }
|
||
+ p.allClosed = true
|
||
+ }
|
||
+}
|
||
+
|
||
+//
|
||
+//
|
||
+// Public interface
|
||
+//
|
||
+//
|
||
+
|
||
+// Reference represents the details of a link.
|
||
+// See the documentation in Options for more details on use-case.
|
||
+type Reference struct {
|
||
+ // Link is usually the URL the reference points to.
|
||
+ Link string
|
||
+ // Title is the alternate text describing the link in more detail.
|
||
+ Title string
|
||
+ // Text is the optional text to override the ref with if the syntax used was
|
||
+ // [refid][]
|
||
+ Text string
|
||
+}
|
||
+
|
||
+// ReferenceOverrideFunc is expected to be called with a reference string and
|
||
+// return either a valid Reference type that the reference string maps to or
|
||
+// nil. If overridden is false, the default reference logic will be executed.
|
||
+// See the documentation in Options for more details on use-case.
|
||
+type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
|
||
+
|
||
+// New constructs a Markdown processor. You can use the same With* functions as
|
||
+// for Run() to customize parser's behavior and the renderer.
|
||
+func New(opts ...Option) *Markdown {
|
||
+ var p Markdown
|
||
+ for _, opt := range opts {
|
||
+ opt(&p)
|
||
+ }
|
||
+ p.refs = make(map[string]*reference)
|
||
+ p.maxNesting = 16
|
||
+ p.insideLink = false
|
||
+ docNode := NewNode(Document)
|
||
+ p.doc = docNode
|
||
+ p.tip = docNode
|
||
+ p.oldTip = docNode
|
||
+ p.lastMatchedContainer = docNode
|
||
+ p.allClosed = true
|
||
+ // register inline parsers
|
||
+ p.inlineCallback[' '] = maybeLineBreak
|
||
+ p.inlineCallback['*'] = emphasis
|
||
+ p.inlineCallback['_'] = emphasis
|
||
+ if p.extensions&Strikethrough != 0 {
|
||
+ p.inlineCallback['~'] = emphasis
|
||
+ }
|
||
+ p.inlineCallback['`'] = codeSpan
|
||
+ p.inlineCallback['\n'] = lineBreak
|
||
+ p.inlineCallback['['] = link
|
||
+ p.inlineCallback['<'] = leftAngle
|
||
+ p.inlineCallback['\\'] = escape
|
||
+ p.inlineCallback['&'] = entity
|
||
+ p.inlineCallback['!'] = maybeImage
|
||
+ p.inlineCallback['^'] = maybeInlineFootnote
|
||
+ if p.extensions&Autolink != 0 {
|
||
+ p.inlineCallback['h'] = maybeAutoLink
|
||
+ p.inlineCallback['m'] = maybeAutoLink
|
||
+ p.inlineCallback['f'] = maybeAutoLink
|
||
+ p.inlineCallback['H'] = maybeAutoLink
|
||
+ p.inlineCallback['M'] = maybeAutoLink
|
||
+ p.inlineCallback['F'] = maybeAutoLink
|
||
+ }
|
||
+ if p.extensions&Footnotes != 0 {
|
||
+ p.notes = make([]*reference, 0)
|
||
+ }
|
||
+ return &p
|
||
+}
|
||
+
|
||
+// Option customizes the Markdown processor's default behavior.
|
||
+type Option func(*Markdown)
|
||
+
|
||
+// WithRenderer allows you to override the default renderer.
|
||
+func WithRenderer(r Renderer) Option {
|
||
+ return func(p *Markdown) {
|
||
+ p.renderer = r
|
||
+ }
|
||
+}
|
||
+
|
||
+// WithExtensions allows you to pick some of the many extensions provided by
|
||
+// Blackfriday. You can bitwise OR them.
|
||
+func WithExtensions(e Extensions) Option {
|
||
+ return func(p *Markdown) {
|
||
+ p.extensions = e
|
||
+ }
|
||
+}
|
||
+
|
||
+// WithNoExtensions turns off all extensions and custom behavior.
|
||
+func WithNoExtensions() Option {
|
||
+ return func(p *Markdown) {
|
||
+ p.extensions = NoExtensions
|
||
+ p.renderer = NewHTMLRenderer(HTMLRendererParameters{
|
||
+ Flags: HTMLFlagsNone,
|
||
+ })
|
||
+ }
|
||
+}
|
||
+
|
||
+// WithRefOverride sets an optional function callback that is called every
|
||
+// time a reference is resolved.
|
||
+//
|
||
+// In Markdown, the link reference syntax can be made to resolve a link to
|
||
+// a reference instead of an inline URL, in one of the following ways:
|
||
+//
|
||
+// * [link text][refid]
|
||
+// * [refid][]
|
||
+//
|
||
+// Usually, the refid is defined at the bottom of the Markdown document. If
|
||
+// this override function is provided, the refid is passed to the override
|
||
+// function first, before consulting the defined refids at the bottom. If
|
||
+// the override function indicates an override did not occur, the refids at
|
||
+// the bottom will be used to fill in the link details.
|
||
+func WithRefOverride(o ReferenceOverrideFunc) Option {
|
||
+ return func(p *Markdown) {
|
||
+ p.referenceOverride = o
|
||
+ }
|
||
+}
|
||
+
|
||
+// Run is the main entry point to Blackfriday. It parses and renders a
|
||
+// block of markdown-encoded text.
|
||
+//
|
||
+// The simplest invocation of Run takes one argument, input:
|
||
+// output := Run(input)
|
||
+// This will parse the input with CommonExtensions enabled and render it with
|
||
+// the default HTMLRenderer (with CommonHTMLFlags).
|
||
+//
|
||
+// Variadic arguments opts can customize the default behavior. Since Markdown
|
||
+// type does not contain exported fields, you can not use it directly. Instead,
|
||
+// use the With* functions. For example, this will call the most basic
|
||
+// functionality, with no extensions:
|
||
+// output := Run(input, WithNoExtensions())
|
||
+//
|
||
+// You can use any number of With* arguments, even contradicting ones. They
|
||
+// will be applied in order of appearance and the latter will override the
|
||
+// former:
|
||
+// output := Run(input, WithNoExtensions(), WithExtensions(exts),
|
||
+// WithRenderer(yourRenderer))
|
||
+func Run(input []byte, opts ...Option) []byte {
|
||
+ r := NewHTMLRenderer(HTMLRendererParameters{
|
||
+ Flags: CommonHTMLFlags,
|
||
+ })
|
||
+ optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)}
|
||
+ optList = append(optList, opts...)
|
||
+ parser := New(optList...)
|
||
+ ast := parser.Parse(input)
|
||
+ var buf bytes.Buffer
|
||
+ parser.renderer.RenderHeader(&buf, ast)
|
||
+ ast.Walk(func(node *Node, entering bool) WalkStatus {
|
||
+ return parser.renderer.RenderNode(&buf, node, entering)
|
||
+ })
|
||
+ parser.renderer.RenderFooter(&buf, ast)
|
||
+ return buf.Bytes()
|
||
+}
|
||
+
|
||
+// Parse is an entry point to the parsing part of Blackfriday. It takes an
|
||
+// input markdown document and produces a syntax tree for its contents. This
|
||
+// tree can then be rendered with a default or custom renderer, or
|
||
+// analyzed/transformed by the caller to whatever non-standard needs they have.
|
||
+// The return value is the root node of the syntax tree.
|
||
+func (p *Markdown) Parse(input []byte) *Node {
|
||
+ p.block(input)
|
||
+ // Walk the tree and finish up some of unfinished blocks
|
||
+ for p.tip != nil {
|
||
+ p.finalize(p.tip)
|
||
+ }
|
||
+ // Walk the tree again and process inline markdown in each block
|
||
+ p.doc.Walk(func(node *Node, entering bool) WalkStatus {
|
||
+ if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell {
|
||
+ p.inline(node, node.content)
|
||
+ node.content = nil
|
||
+ }
|
||
+ return GoToNext
|
||
+ })
|
||
+ p.parseRefsToAST()
|
||
+ return p.doc
|
||
+}
|
||
+
|
||
+func (p *Markdown) parseRefsToAST() {
|
||
+ if p.extensions&Footnotes == 0 || len(p.notes) == 0 {
|
||
+ return
|
||
+ }
|
||
+ p.tip = p.doc
|
||
+ block := p.addBlock(List, nil)
|
||
+ block.IsFootnotesList = true
|
||
+ block.ListFlags = ListTypeOrdered
|
||
+ flags := ListItemBeginningOfList
|
||
+ // Note: this loop is intentionally explicit, not range-form. This is
|
||
+ // because the body of the loop will append nested footnotes to p.notes and
|
||
+ // we need to process those late additions. Range form would only walk over
|
||
+ // the fixed initial set.
|
||
+ for i := 0; i < len(p.notes); i++ {
|
||
+ ref := p.notes[i]
|
||
+ p.addExistingChild(ref.footnote, 0)
|
||
+ block := ref.footnote
|
||
+ block.ListFlags = flags | ListTypeOrdered
|
||
+ block.RefLink = ref.link
|
||
+ if ref.hasBlock {
|
||
+ flags |= ListItemContainsBlock
|
||
+ p.block(ref.title)
|
||
+ } else {
|
||
+ p.inline(block, ref.title)
|
||
+ }
|
||
+ flags &^= ListItemBeginningOfList | ListItemContainsBlock
|
||
+ }
|
||
+ above := block.Parent
|
||
+ finalizeList(block)
|
||
+ p.tip = above
|
||
+ block.Walk(func(node *Node, entering bool) WalkStatus {
|
||
+ if node.Type == Paragraph || node.Type == Heading {
|
||
+ p.inline(node, node.content)
|
||
+ node.content = nil
|
||
+ }
|
||
+ return GoToNext
|
||
+ })
|
||
+}
|
||
+
|
||
+//
|
||
+// Link references
|
||
+//
|
||
+// This section implements support for references that (usually) appear
|
||
+// as footnotes in a document, and can be referenced anywhere in the document.
|
||
+// The basic format is:
|
||
+//
|
||
+// [1]: http://www.google.com/ "Google"
|
||
+// [2]: http://www.github.com/ "Github"
|
||
+//
|
||
+// Anywhere in the document, the reference can be linked by referring to its
|
||
+// label, i.e., 1 and 2 in this example, as in:
|
||
+//
|
||
+// This library is hosted on [Github][2], a git hosting site.
|
||
+//
|
||
+// Actual footnotes as specified in Pandoc and supported by some other Markdown
|
||
+// libraries such as php-markdown are also taken care of. They look like this:
|
||
+//
|
||
+// This sentence needs a bit of further explanation.[^note]
|
||
+//
|
||
+// [^note]: This is the explanation.
|
||
+//
|
||
+// Footnotes should be placed at the end of the document in an ordered list.
|
||
+// Finally, there are inline footnotes such as:
|
||
+//
|
||
+// Inline footnotes^[Also supported.] provide a quick inline explanation,
|
||
+// but are rendered at the bottom of the document.
|
||
+//
|
||
+
|
||
+// reference holds all information necessary for a reference-style links or
|
||
+// footnotes.
|
||
+//
|
||
+// Consider this markdown with reference-style links:
|
||
+//
|
||
+// [link][ref]
|
||
+//
|
||
+// [ref]: /url/ "tooltip title"
|
||
+//
|
||
+// It will be ultimately converted to this HTML:
|
||
+//
|
||
+// <p><a href=\"/url/\" title=\"title\">link</a></p>
|
||
+//
|
||
+// And a reference structure will be populated as follows:
|
||
+//
|
||
+// p.refs["ref"] = &reference{
|
||
+// link: "/url/",
|
||
+// title: "tooltip title",
|
||
+// }
|
||
+//
|
||
+// Alternatively, reference can contain information about a footnote. Consider
|
||
+// this markdown:
|
||
+//
|
||
+// Text needing a footnote.[^a]
|
||
+//
|
||
+// [^a]: This is the note
|
||
+//
|
||
+// A reference structure will be populated as follows:
|
||
+//
|
||
+// p.refs["a"] = &reference{
|
||
+// link: "a",
|
||
+// title: "This is the note",
|
||
+// noteID: <some positive int>,
|
||
+// }
|
||
+//
|
||
+// TODO: As you can see, it begs for splitting into two dedicated structures
|
||
+// for refs and for footnotes.
|
||
+type reference struct {
|
||
+ link []byte
|
||
+ title []byte
|
||
+ noteID int // 0 if not a footnote ref
|
||
+ hasBlock bool
|
||
+ footnote *Node // a link to the Item node within a list of footnotes
|
||
+
|
||
+ text []byte // only gets populated by refOverride feature with Reference.Text
|
||
+}
|
||
+
|
||
+func (r *reference) String() string {
|
||
+ return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}",
|
||
+ r.link, r.title, r.text, r.noteID, r.hasBlock)
|
||
+}
|
||
+
|
||
+// Check whether or not data starts with a reference link.
|
||
+// If so, it is parsed and stored in the list of references
|
||
+// (in the render struct).
|
||
+// Returns the number of bytes to skip to move past it,
|
||
+// or zero if the first line is not a reference.
|
||
+func isReference(p *Markdown, data []byte, tabSize int) int {
|
||
+ // up to 3 optional leading spaces
|
||
+ if len(data) < 4 {
|
||
+ return 0
|
||
+ }
|
||
+ i := 0
|
||
+ for i < 3 && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ noteID := 0
|
||
+
|
||
+ // id part: anything but a newline between brackets
|
||
+ if data[i] != '[' {
|
||
+ return 0
|
||
+ }
|
||
+ i++
|
||
+ if p.extensions&Footnotes != 0 {
|
||
+ if i < len(data) && data[i] == '^' {
|
||
+ // we can set it to anything here because the proper noteIds will
|
||
+ // be assigned later during the second pass. It just has to be != 0
|
||
+ noteID = 1
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+ idOffset := i
|
||
+ for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) || data[i] != ']' {
|
||
+ return 0
|
||
+ }
|
||
+ idEnd := i
|
||
+ // footnotes can have empty ID, like this: [^], but a reference can not be
|
||
+ // empty like this: []. Break early if it's not a footnote and there's no ID
|
||
+ if noteID == 0 && idOffset == idEnd {
|
||
+ return 0
|
||
+ }
|
||
+ // spacer: colon (space | tab)* newline? (space | tab)*
|
||
+ i++
|
||
+ if i >= len(data) || data[i] != ':' {
|
||
+ return 0
|
||
+ }
|
||
+ i++
|
||
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||
+ i++
|
||
+ }
|
||
+ if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
|
||
+ i++
|
||
+ if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ var (
|
||
+ linkOffset, linkEnd int
|
||
+ titleOffset, titleEnd int
|
||
+ lineEnd int
|
||
+ raw []byte
|
||
+ hasBlock bool
|
||
+ )
|
||
+
|
||
+ if p.extensions&Footnotes != 0 && noteID != 0 {
|
||
+ linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
|
||
+ lineEnd = linkEnd
|
||
+ } else {
|
||
+ linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
|
||
+ }
|
||
+ if lineEnd == 0 {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // a valid ref has been found
|
||
+
|
||
+ ref := &reference{
|
||
+ noteID: noteID,
|
||
+ hasBlock: hasBlock,
|
||
+ }
|
||
+
|
||
+ if noteID > 0 {
|
||
+ // reusing the link field for the id since footnotes don't have links
|
||
+ ref.link = data[idOffset:idEnd]
|
||
+ // if footnote, it's not really a title, it's the contained text
|
||
+ ref.title = raw
|
||
+ } else {
|
||
+ ref.link = data[linkOffset:linkEnd]
|
||
+ ref.title = data[titleOffset:titleEnd]
|
||
+ }
|
||
+
|
||
+ // id matches are case-insensitive
|
||
+ id := string(bytes.ToLower(data[idOffset:idEnd]))
|
||
+
|
||
+ p.refs[id] = ref
|
||
+
|
||
+ return lineEnd
|
||
+}
|
||
+
|
||
+func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
|
||
+ // link: whitespace-free sequence, optionally between angle brackets
|
||
+ if data[i] == '<' {
|
||
+ i++
|
||
+ }
|
||
+ linkOffset = i
|
||
+ for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
||
+ i++
|
||
+ }
|
||
+ linkEnd = i
|
||
+ if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
|
||
+ linkOffset++
|
||
+ linkEnd--
|
||
+ }
|
||
+
|
||
+ // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
|
||
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||
+ i++
|
||
+ }
|
||
+ if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // compute end-of-line
|
||
+ if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
|
||
+ lineEnd = i
|
||
+ }
|
||
+ if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
|
||
+ lineEnd++
|
||
+ }
|
||
+
|
||
+ // optional (space|tab)* spacer after a newline
|
||
+ if lineEnd > 0 {
|
||
+ i = lineEnd + 1
|
||
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // optional title: any non-newline sequence enclosed in '"() alone on its line
|
||
+ if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
|
||
+ i++
|
||
+ titleOffset = i
|
||
+
|
||
+ // look for EOL
|
||
+ for i < len(data) && data[i] != '\n' && data[i] != '\r' {
|
||
+ i++
|
||
+ }
|
||
+ if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
|
||
+ titleEnd = i + 1
|
||
+ } else {
|
||
+ titleEnd = i
|
||
+ }
|
||
+
|
||
+ // step back
|
||
+ i--
|
||
+ for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
|
||
+ i--
|
||
+ }
|
||
+ if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
|
||
+ lineEnd = titleEnd
|
||
+ titleEnd = i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return
|
||
+}
|
||
+
|
||
+// The first bit of this logic is the same as Parser.listItem, but the rest
|
||
+// is much simpler. This function simply finds the entire block and shifts it
|
||
+// over by one tab if it is indeed a block (just returns the line if it's not).
|
||
+// blockEnd is the end of the section in the input buffer, and contents is the
|
||
+// extracted text that was shifted over one tab. It will need to be rendered at
|
||
+// the end of the document.
|
||
+func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
|
||
+ if i == 0 || len(data) == 0 {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // skip leading whitespace on first line
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ blockStart = i
|
||
+
|
||
+ // find the end of the line
|
||
+ blockEnd = i
|
||
+ for i < len(data) && data[i-1] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // get working buffer
|
||
+ var raw bytes.Buffer
|
||
+
|
||
+ // put the first line into the working buffer
|
||
+ raw.Write(data[blockEnd:i])
|
||
+ blockEnd = i
|
||
+
|
||
+ // process the following lines
|
||
+ containsBlankLine := false
|
||
+
|
||
+gatherLines:
|
||
+ for blockEnd < len(data) {
|
||
+ i++
|
||
+
|
||
+ // find the end of this line
|
||
+ for i < len(data) && data[i-1] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // if it is an empty line, guess that it is part of this item
|
||
+ // and move on to the next line
|
||
+ if p.isEmpty(data[blockEnd:i]) > 0 {
|
||
+ containsBlankLine = true
|
||
+ blockEnd = i
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ n := 0
|
||
+ if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
|
||
+ // this is the end of the block.
|
||
+ // we don't want to include this last line in the index.
|
||
+ break gatherLines
|
||
+ }
|
||
+
|
||
+ // if there were blank lines before this one, insert a new one now
|
||
+ if containsBlankLine {
|
||
+ raw.WriteByte('\n')
|
||
+ containsBlankLine = false
|
||
+ }
|
||
+
|
||
+ // get rid of that first tab, write to buffer
|
||
+ raw.Write(data[blockEnd+n : i])
|
||
+ hasBlock = true
|
||
+
|
||
+ blockEnd = i
|
||
+ }
|
||
+
|
||
+ if data[blockEnd-1] != '\n' {
|
||
+ raw.WriteByte('\n')
|
||
+ }
|
||
+
|
||
+ contents = raw.Bytes()
|
||
+
|
||
+ return
|
||
+}
|
||
+
|
||
+//
|
||
+//
|
||
+// Miscellaneous helper functions
|
||
+//
|
||
+//
|
||
+
|
||
+// Test if a character is a punctuation symbol.
|
||
+// Taken from a private function in regexp in the stdlib.
|
||
+func ispunct(c byte) bool {
|
||
+ for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
|
||
+ if c == r {
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// Test if a character is a whitespace character.
|
||
+func isspace(c byte) bool {
|
||
+ return ishorizontalspace(c) || isverticalspace(c)
|
||
+}
|
||
+
|
||
+// Test if a character is a horizontal whitespace character.
|
||
+func ishorizontalspace(c byte) bool {
|
||
+ return c == ' ' || c == '\t'
|
||
+}
|
||
+
|
||
+// Test if a character is a vertical character.
|
||
+func isverticalspace(c byte) bool {
|
||
+ return c == '\n' || c == '\r' || c == '\f' || c == '\v'
|
||
+}
|
||
+
|
||
+// Test if a character is letter.
|
||
+func isletter(c byte) bool {
|
||
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||
+}
|
||
+
|
||
+// Test if a character is a letter or a digit.
|
||
+// TODO: check when this is looking for ASCII alnum and when it should use unicode
|
||
+func isalnum(c byte) bool {
|
||
+ return (c >= '0' && c <= '9') || isletter(c)
|
||
+}
|
||
+
|
||
+// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
|
||
+// always ends output with a newline
|
||
+func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
|
||
+ // first, check for common cases: no tabs, or only tabs at beginning of line
|
||
+ i, prefix := 0, 0
|
||
+ slowcase := false
|
||
+ for i = 0; i < len(line); i++ {
|
||
+ if line[i] == '\t' {
|
||
+ if prefix == i {
|
||
+ prefix++
|
||
+ } else {
|
||
+ slowcase = true
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // no need to decode runes if all tabs are at the beginning of the line
|
||
+ if !slowcase {
|
||
+ for i = 0; i < prefix*tabSize; i++ {
|
||
+ out.WriteByte(' ')
|
||
+ }
|
||
+ out.Write(line[prefix:])
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // the slow case: we need to count runes to figure out how
|
||
+ // many spaces to insert for each tab
|
||
+ column := 0
|
||
+ i = 0
|
||
+ for i < len(line) {
|
||
+ start := i
|
||
+ for i < len(line) && line[i] != '\t' {
|
||
+ _, size := utf8.DecodeRune(line[i:])
|
||
+ i += size
|
||
+ column++
|
||
+ }
|
||
+
|
||
+ if i > start {
|
||
+ out.Write(line[start:i])
|
||
+ }
|
||
+
|
||
+ if i >= len(line) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ for {
|
||
+ out.WriteByte(' ')
|
||
+ column++
|
||
+ if column%tabSize == 0 {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+
|
||
+ i++
|
||
+ }
|
||
+}
|
||
+
|
||
+// Find if a line counts as indented or not.
|
||
+// Returns number of characters the indent is (0 = not indented).
|
||
+func isIndented(data []byte, indentSize int) int {
|
||
+ if len(data) == 0 {
|
||
+ return 0
|
||
+ }
|
||
+ if data[0] == '\t' {
|
||
+ return 1
|
||
+ }
|
||
+ if len(data) < indentSize {
|
||
+ return 0
|
||
+ }
|
||
+ for i := 0; i < indentSize; i++ {
|
||
+ if data[i] != ' ' {
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+ return indentSize
|
||
+}
|
||
+
|
||
+// Create a url-safe slug for fragments
|
||
+func slugify(in []byte) []byte {
|
||
+ if len(in) == 0 {
|
||
+ return in
|
||
+ }
|
||
+ out := make([]byte, 0, len(in))
|
||
+ sym := false
|
||
+
|
||
+ for _, ch := range in {
|
||
+ if isalnum(ch) {
|
||
+ sym = false
|
||
+ out = append(out, ch)
|
||
+ } else if sym {
|
||
+ continue
|
||
+ } else {
|
||
+ out = append(out, '-')
|
||
+ sym = true
|
||
+ }
|
||
+ }
|
||
+ var a, b int
|
||
+ var ch byte
|
||
+ for a, ch = range out {
|
||
+ if ch != '-' {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ for b = len(out) - 1; b > 0; b-- {
|
||
+ if out[b] != '-' {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ return out[a : b+1]
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go
|
||
new file mode 100644
|
||
index 000000000000..04e6050ceeae
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/node.go
|
||
@@ -0,0 +1,360 @@
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+)
|
||
+
|
||
+// NodeType specifies a type of a single node of a syntax tree. Usually one
|
||
+// node (and its type) corresponds to a single markdown feature, e.g. emphasis
|
||
+// or code block.
|
||
+type NodeType int
|
||
+
|
||
+// Constants for identifying different types of nodes. See NodeType.
|
||
+const (
|
||
+ Document NodeType = iota
|
||
+ BlockQuote
|
||
+ List
|
||
+ Item
|
||
+ Paragraph
|
||
+ Heading
|
||
+ HorizontalRule
|
||
+ Emph
|
||
+ Strong
|
||
+ Del
|
||
+ Link
|
||
+ Image
|
||
+ Text
|
||
+ HTMLBlock
|
||
+ CodeBlock
|
||
+ Softbreak
|
||
+ Hardbreak
|
||
+ Code
|
||
+ HTMLSpan
|
||
+ Table
|
||
+ TableCell
|
||
+ TableHead
|
||
+ TableBody
|
||
+ TableRow
|
||
+)
|
||
+
|
||
+var nodeTypeNames = []string{
|
||
+ Document: "Document",
|
||
+ BlockQuote: "BlockQuote",
|
||
+ List: "List",
|
||
+ Item: "Item",
|
||
+ Paragraph: "Paragraph",
|
||
+ Heading: "Heading",
|
||
+ HorizontalRule: "HorizontalRule",
|
||
+ Emph: "Emph",
|
||
+ Strong: "Strong",
|
||
+ Del: "Del",
|
||
+ Link: "Link",
|
||
+ Image: "Image",
|
||
+ Text: "Text",
|
||
+ HTMLBlock: "HTMLBlock",
|
||
+ CodeBlock: "CodeBlock",
|
||
+ Softbreak: "Softbreak",
|
||
+ Hardbreak: "Hardbreak",
|
||
+ Code: "Code",
|
||
+ HTMLSpan: "HTMLSpan",
|
||
+ Table: "Table",
|
||
+ TableCell: "TableCell",
|
||
+ TableHead: "TableHead",
|
||
+ TableBody: "TableBody",
|
||
+ TableRow: "TableRow",
|
||
+}
|
||
+
|
||
+func (t NodeType) String() string {
|
||
+ return nodeTypeNames[t]
|
||
+}
|
||
+
|
||
+// ListData contains fields relevant to a List and Item node type.
|
||
+type ListData struct {
|
||
+ ListFlags ListType
|
||
+ Tight bool // Skip <p>s around list item data if true
|
||
+ BulletChar byte // '*', '+' or '-' in bullet lists
|
||
+ Delimiter byte // '.' or ')' after the number in ordered lists
|
||
+ RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
|
||
+ IsFootnotesList bool // This is a list of footnotes
|
||
+}
|
||
+
|
||
+// LinkData contains fields relevant to a Link node type.
|
||
+type LinkData struct {
|
||
+ Destination []byte // Destination is what goes into a href
|
||
+ Title []byte // Title is the tooltip thing that goes in a title attribute
|
||
+ NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote
|
||
+ Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil.
|
||
+}
|
||
+
|
||
+// CodeBlockData contains fields relevant to a CodeBlock node type.
|
||
+type CodeBlockData struct {
|
||
+ IsFenced bool // Specifies whether it's a fenced code block or an indented one
|
||
+ Info []byte // This holds the info string
|
||
+ FenceChar byte
|
||
+ FenceLength int
|
||
+ FenceOffset int
|
||
+}
|
||
+
|
||
+// TableCellData contains fields relevant to a TableCell node type.
|
||
+type TableCellData struct {
|
||
+ IsHeader bool // This tells if it's under the header row
|
||
+ Align CellAlignFlags // This holds the value for align attribute
|
||
+}
|
||
+
|
||
+// HeadingData contains fields relevant to a Heading node type.
|
||
+type HeadingData struct {
|
||
+ Level int // This holds the heading level number
|
||
+ HeadingID string // This might hold heading ID, if present
|
||
+ IsTitleblock bool // Specifies whether it's a title block
|
||
+}
|
||
+
|
||
+// Node is a single element in the abstract syntax tree of the parsed document.
|
||
+// It holds connections to the structurally neighboring nodes and, for certain
|
||
+// types of nodes, additional information that might be needed when rendering.
|
||
+type Node struct {
|
||
+ Type NodeType // Determines the type of the node
|
||
+ Parent *Node // Points to the parent
|
||
+ FirstChild *Node // Points to the first child, if any
|
||
+ LastChild *Node // Points to the last child, if any
|
||
+ Prev *Node // Previous sibling; nil if it's the first child
|
||
+ Next *Node // Next sibling; nil if it's the last child
|
||
+
|
||
+ Literal []byte // Text contents of the leaf nodes
|
||
+
|
||
+ HeadingData // Populated if Type is Heading
|
||
+ ListData // Populated if Type is List
|
||
+ CodeBlockData // Populated if Type is CodeBlock
|
||
+ LinkData // Populated if Type is Link
|
||
+ TableCellData // Populated if Type is TableCell
|
||
+
|
||
+ content []byte // Markdown content of the block nodes
|
||
+ open bool // Specifies an open block node that has not been finished to process yet
|
||
+}
|
||
+
|
||
+// NewNode allocates a node of a specified type.
|
||
+func NewNode(typ NodeType) *Node {
|
||
+ return &Node{
|
||
+ Type: typ,
|
||
+ open: true,
|
||
+ }
|
||
+}
|
||
+
|
||
+func (n *Node) String() string {
|
||
+ ellipsis := ""
|
||
+ snippet := n.Literal
|
||
+ if len(snippet) > 16 {
|
||
+ snippet = snippet[:16]
|
||
+ ellipsis = "..."
|
||
+ }
|
||
+ return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis)
|
||
+}
|
||
+
|
||
+// Unlink removes node 'n' from the tree.
|
||
+// It panics if the node is nil.
|
||
+func (n *Node) Unlink() {
|
||
+ if n.Prev != nil {
|
||
+ n.Prev.Next = n.Next
|
||
+ } else if n.Parent != nil {
|
||
+ n.Parent.FirstChild = n.Next
|
||
+ }
|
||
+ if n.Next != nil {
|
||
+ n.Next.Prev = n.Prev
|
||
+ } else if n.Parent != nil {
|
||
+ n.Parent.LastChild = n.Prev
|
||
+ }
|
||
+ n.Parent = nil
|
||
+ n.Next = nil
|
||
+ n.Prev = nil
|
||
+}
|
||
+
|
||
+// AppendChild adds a node 'child' as a child of 'n'.
|
||
+// It panics if either node is nil.
|
||
+func (n *Node) AppendChild(child *Node) {
|
||
+ child.Unlink()
|
||
+ child.Parent = n
|
||
+ if n.LastChild != nil {
|
||
+ n.LastChild.Next = child
|
||
+ child.Prev = n.LastChild
|
||
+ n.LastChild = child
|
||
+ } else {
|
||
+ n.FirstChild = child
|
||
+ n.LastChild = child
|
||
+ }
|
||
+}
|
||
+
|
||
+// InsertBefore inserts 'sibling' immediately before 'n'.
|
||
+// It panics if either node is nil.
|
||
+func (n *Node) InsertBefore(sibling *Node) {
|
||
+ sibling.Unlink()
|
||
+ sibling.Prev = n.Prev
|
||
+ if sibling.Prev != nil {
|
||
+ sibling.Prev.Next = sibling
|
||
+ }
|
||
+ sibling.Next = n
|
||
+ n.Prev = sibling
|
||
+ sibling.Parent = n.Parent
|
||
+ if sibling.Prev == nil {
|
||
+ sibling.Parent.FirstChild = sibling
|
||
+ }
|
||
+}
|
||
+
|
||
+// IsContainer returns true if 'n' can contain children.
|
||
+func (n *Node) IsContainer() bool {
|
||
+ switch n.Type {
|
||
+ case Document:
|
||
+ fallthrough
|
||
+ case BlockQuote:
|
||
+ fallthrough
|
||
+ case List:
|
||
+ fallthrough
|
||
+ case Item:
|
||
+ fallthrough
|
||
+ case Paragraph:
|
||
+ fallthrough
|
||
+ case Heading:
|
||
+ fallthrough
|
||
+ case Emph:
|
||
+ fallthrough
|
||
+ case Strong:
|
||
+ fallthrough
|
||
+ case Del:
|
||
+ fallthrough
|
||
+ case Link:
|
||
+ fallthrough
|
||
+ case Image:
|
||
+ fallthrough
|
||
+ case Table:
|
||
+ fallthrough
|
||
+ case TableHead:
|
||
+ fallthrough
|
||
+ case TableBody:
|
||
+ fallthrough
|
||
+ case TableRow:
|
||
+ fallthrough
|
||
+ case TableCell:
|
||
+ return true
|
||
+ default:
|
||
+ return false
|
||
+ }
|
||
+}
|
||
+
|
||
+// IsLeaf returns true if 'n' is a leaf node.
|
||
+func (n *Node) IsLeaf() bool {
|
||
+ return !n.IsContainer()
|
||
+}
|
||
+
|
||
+func (n *Node) canContain(t NodeType) bool {
|
||
+ if n.Type == List {
|
||
+ return t == Item
|
||
+ }
|
||
+ if n.Type == Document || n.Type == BlockQuote || n.Type == Item {
|
||
+ return t != Item
|
||
+ }
|
||
+ if n.Type == Table {
|
||
+ return t == TableHead || t == TableBody
|
||
+ }
|
||
+ if n.Type == TableHead || n.Type == TableBody {
|
||
+ return t == TableRow
|
||
+ }
|
||
+ if n.Type == TableRow {
|
||
+ return t == TableCell
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// WalkStatus allows NodeVisitor to have some control over the tree traversal.
|
||
+// It is returned from NodeVisitor and different values allow Node.Walk to
|
||
+// decide which node to go to next.
|
||
+type WalkStatus int
|
||
+
|
||
+const (
|
||
+ // GoToNext is the default traversal of every node.
|
||
+ GoToNext WalkStatus = iota
|
||
+ // SkipChildren tells walker to skip all children of current node.
|
||
+ SkipChildren
|
||
+ // Terminate tells walker to terminate the traversal.
|
||
+ Terminate
|
||
+)
|
||
+
|
||
+// NodeVisitor is a callback to be called when traversing the syntax tree.
|
||
+// Called twice for every node: once with entering=true when the branch is
|
||
+// first visited, then with entering=false after all the children are done.
|
||
+type NodeVisitor func(node *Node, entering bool) WalkStatus
|
||
+
|
||
+// Walk is a convenience method that instantiates a walker and starts a
|
||
+// traversal of subtree rooted at n.
|
||
+func (n *Node) Walk(visitor NodeVisitor) {
|
||
+ w := newNodeWalker(n)
|
||
+ for w.current != nil {
|
||
+ status := visitor(w.current, w.entering)
|
||
+ switch status {
|
||
+ case GoToNext:
|
||
+ w.next()
|
||
+ case SkipChildren:
|
||
+ w.entering = false
|
||
+ w.next()
|
||
+ case Terminate:
|
||
+ return
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+type nodeWalker struct {
|
||
+ current *Node
|
||
+ root *Node
|
||
+ entering bool
|
||
+}
|
||
+
|
||
+func newNodeWalker(root *Node) *nodeWalker {
|
||
+ return &nodeWalker{
|
||
+ current: root,
|
||
+ root: root,
|
||
+ entering: true,
|
||
+ }
|
||
+}
|
||
+
|
||
+func (nw *nodeWalker) next() {
|
||
+ if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root {
|
||
+ nw.current = nil
|
||
+ return
|
||
+ }
|
||
+ if nw.entering && nw.current.IsContainer() {
|
||
+ if nw.current.FirstChild != nil {
|
||
+ nw.current = nw.current.FirstChild
|
||
+ nw.entering = true
|
||
+ } else {
|
||
+ nw.entering = false
|
||
+ }
|
||
+ } else if nw.current.Next == nil {
|
||
+ nw.current = nw.current.Parent
|
||
+ nw.entering = false
|
||
+ } else {
|
||
+ nw.current = nw.current.Next
|
||
+ nw.entering = true
|
||
+ }
|
||
+}
|
||
+
|
||
+func dump(ast *Node) {
|
||
+ fmt.Println(dumpString(ast))
|
||
+}
|
||
+
|
||
+func dumpR(ast *Node, depth int) string {
|
||
+ if ast == nil {
|
||
+ return ""
|
||
+ }
|
||
+ indent := bytes.Repeat([]byte("\t"), depth)
|
||
+ content := ast.Literal
|
||
+ if content == nil {
|
||
+ content = ast.content
|
||
+ }
|
||
+ result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content)
|
||
+ for n := ast.FirstChild; n != nil; n = n.Next {
|
||
+ result += dumpR(n, depth+1)
|
||
+ }
|
||
+ return result
|
||
+}
|
||
+
|
||
+func dumpString(ast *Node) string {
|
||
+ return dumpR(ast, 0)
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go
|
||
new file mode 100644
|
||
index 000000000000..3a220e94247d
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go
|
||
@@ -0,0 +1,457 @@
|
||
+//
|
||
+// Blackfriday Markdown Processor
|
||
+// Available at http://github.com/russross/blackfriday
|
||
+//
|
||
+// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||
+// Distributed under the Simplified BSD License.
|
||
+// See README.md for details.
|
||
+//
|
||
+
|
||
+//
|
||
+//
|
||
+// SmartyPants rendering
|
||
+//
|
||
+//
|
||
+
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "io"
|
||
+)
|
||
+
|
||
+// SPRenderer is a struct containing state of a Smartypants renderer.
|
||
+type SPRenderer struct {
|
||
+ inSingleQuote bool
|
||
+ inDoubleQuote bool
|
||
+ callbacks [256]smartCallback
|
||
+}
|
||
+
|
||
+func wordBoundary(c byte) bool {
|
||
+ return c == 0 || isspace(c) || ispunct(c)
|
||
+}
|
||
+
|
||
+func tolower(c byte) byte {
|
||
+ if c >= 'A' && c <= 'Z' {
|
||
+ return c - 'A' + 'a'
|
||
+ }
|
||
+ return c
|
||
+}
|
||
+
|
||
+func isdigit(c byte) bool {
|
||
+ return c >= '0' && c <= '9'
|
||
+}
|
||
+
|
||
+func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool {
|
||
+ // edge of the buffer is likely to be a tag that we don't get to see,
|
||
+ // so we treat it like text sometimes
|
||
+
|
||
+ // enumerate all sixteen possibilities for (previousChar, nextChar)
|
||
+ // each can be one of {0, space, punct, other}
|
||
+ switch {
|
||
+ case previousChar == 0 && nextChar == 0:
|
||
+ // context is not any help here, so toggle
|
||
+ *isOpen = !*isOpen
|
||
+ case isspace(previousChar) && nextChar == 0:
|
||
+ // [ "] might be [ "<code>foo...]
|
||
+ *isOpen = true
|
||
+ case ispunct(previousChar) && nextChar == 0:
|
||
+ // [!"] hmm... could be [Run!"] or [("<code>...]
|
||
+ *isOpen = false
|
||
+ case /* isnormal(previousChar) && */ nextChar == 0:
|
||
+ // [a"] is probably a close
|
||
+ *isOpen = false
|
||
+ case previousChar == 0 && isspace(nextChar):
|
||
+ // [" ] might be [...foo</code>" ]
|
||
+ *isOpen = false
|
||
+ case isspace(previousChar) && isspace(nextChar):
|
||
+ // [ " ] context is not any help here, so toggle
|
||
+ *isOpen = !*isOpen
|
||
+ case ispunct(previousChar) && isspace(nextChar):
|
||
+ // [!" ] is probably a close
|
||
+ *isOpen = false
|
||
+ case /* isnormal(previousChar) && */ isspace(nextChar):
|
||
+ // [a" ] this is one of the easy cases
|
||
+ *isOpen = false
|
||
+ case previousChar == 0 && ispunct(nextChar):
|
||
+ // ["!] hmm... could be ["$1.95] or [</code>"!...]
|
||
+ *isOpen = false
|
||
+ case isspace(previousChar) && ispunct(nextChar):
|
||
+ // [ "!] looks more like [ "$1.95]
|
||
+ *isOpen = true
|
||
+ case ispunct(previousChar) && ispunct(nextChar):
|
||
+ // [!"!] context is not any help here, so toggle
|
||
+ *isOpen = !*isOpen
|
||
+ case /* isnormal(previousChar) && */ ispunct(nextChar):
|
||
+ // [a"!] is probably a close
|
||
+ *isOpen = false
|
||
+ case previousChar == 0 /* && isnormal(nextChar) */ :
|
||
+ // ["a] is probably an open
|
||
+ *isOpen = true
|
||
+ case isspace(previousChar) /* && isnormal(nextChar) */ :
|
||
+ // [ "a] this is one of the easy cases
|
||
+ *isOpen = true
|
||
+ case ispunct(previousChar) /* && isnormal(nextChar) */ :
|
||
+ // [!"a] is probably an open
|
||
+ *isOpen = true
|
||
+ default:
|
||
+ // [a'b] maybe a contraction?
|
||
+ *isOpen = false
|
||
+ }
|
||
+
|
||
+ // Note that with the limited lookahead, this non-breaking
|
||
+ // space will also be appended to single double quotes.
|
||
+ if addNBSP && !*isOpen {
|
||
+ out.WriteString(" ")
|
||
+ }
|
||
+
|
||
+ out.WriteByte('&')
|
||
+ if *isOpen {
|
||
+ out.WriteByte('l')
|
||
+ } else {
|
||
+ out.WriteByte('r')
|
||
+ }
|
||
+ out.WriteByte(quote)
|
||
+ out.WriteString("quo;")
|
||
+
|
||
+ if addNBSP && *isOpen {
|
||
+ out.WriteString(" ")
|
||
+ }
|
||
+
|
||
+ return true
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 2 {
|
||
+ t1 := tolower(text[1])
|
||
+
|
||
+ if t1 == '\'' {
|
||
+ nextChar := byte(0)
|
||
+ if len(text) >= 3 {
|
||
+ nextChar = text[2]
|
||
+ }
|
||
+ if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
||
+ return 1
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
|
||
+ out.WriteString("’")
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ if len(text) >= 3 {
|
||
+ t2 := tolower(text[2])
|
||
+
|
||
+ if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
|
||
+ (len(text) < 4 || wordBoundary(text[3])) {
|
||
+ out.WriteString("’")
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ nextChar := byte(0)
|
||
+ if len(text) > 1 {
|
||
+ nextChar = text[1]
|
||
+ }
|
||
+ if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 3 {
|
||
+ t1 := tolower(text[1])
|
||
+ t2 := tolower(text[2])
|
||
+
|
||
+ if t1 == 'c' && t2 == ')' {
|
||
+ out.WriteString("©")
|
||
+ return 2
|
||
+ }
|
||
+
|
||
+ if t1 == 'r' && t2 == ')' {
|
||
+ out.WriteString("®")
|
||
+ return 2
|
||
+ }
|
||
+
|
||
+ if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
|
||
+ out.WriteString("™")
|
||
+ return 3
|
||
+ }
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 2 {
|
||
+ if text[1] == '-' {
|
||
+ out.WriteString("—")
|
||
+ return 1
|
||
+ }
|
||
+
|
||
+ if wordBoundary(previousChar) && wordBoundary(text[1]) {
|
||
+ out.WriteString("–")
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
|
||
+ out.WriteString("—")
|
||
+ return 2
|
||
+ }
|
||
+ if len(text) >= 2 && text[1] == '-' {
|
||
+ out.WriteString("–")
|
||
+ return 1
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int {
|
||
+ if bytes.HasPrefix(text, []byte(""")) {
|
||
+ nextChar := byte(0)
|
||
+ if len(text) >= 7 {
|
||
+ nextChar = text[6]
|
||
+ }
|
||
+ if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) {
|
||
+ return 5
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if bytes.HasPrefix(text, []byte("�")) {
|
||
+ return 3
|
||
+ }
|
||
+
|
||
+ out.WriteByte('&')
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int {
|
||
+ var quote byte = 'd'
|
||
+ if angledQuotes {
|
||
+ quote = 'a'
|
||
+ }
|
||
+
|
||
+ return func(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ return r.smartAmpVariant(out, previousChar, text, quote, addNBSP)
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
|
||
+ out.WriteString("…")
|
||
+ return 2
|
||
+ }
|
||
+
|
||
+ if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
|
||
+ out.WriteString("…")
|
||
+ return 4
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 2 && text[1] == '`' {
|
||
+ nextChar := byte(0)
|
||
+ if len(text) >= 3 {
|
||
+ nextChar = text[2]
|
||
+ }
|
||
+ if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
||
+ return 1
|
||
+ }
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
||
+ // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
|
||
+ // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8)
|
||
+ // and avoid changing dates like 1/23/2005 into fractions.
|
||
+ numEnd := 0
|
||
+ for len(text) > numEnd && isdigit(text[numEnd]) {
|
||
+ numEnd++
|
||
+ }
|
||
+ if numEnd == 0 {
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+ }
|
||
+ denStart := numEnd + 1
|
||
+ if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
|
||
+ denStart = numEnd + 3
|
||
+ } else if len(text) < numEnd+2 || text[numEnd] != '/' {
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+ }
|
||
+ denEnd := denStart
|
||
+ for len(text) > denEnd && isdigit(text[denEnd]) {
|
||
+ denEnd++
|
||
+ }
|
||
+ if denEnd == denStart {
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+ }
|
||
+ if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
|
||
+ out.WriteString("<sup>")
|
||
+ out.Write(text[:numEnd])
|
||
+ out.WriteString("</sup>⁄<sub>")
|
||
+ out.Write(text[denStart:denEnd])
|
||
+ out.WriteString("</sub>")
|
||
+ return denEnd - 1
|
||
+ }
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
||
+ if text[0] == '1' && text[1] == '/' && text[2] == '2' {
|
||
+ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
|
||
+ out.WriteString("½")
|
||
+ return 2
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if text[0] == '1' && text[1] == '/' && text[2] == '4' {
|
||
+ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
|
||
+ out.WriteString("¼")
|
||
+ return 2
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if text[0] == '3' && text[1] == '/' && text[2] == '4' {
|
||
+ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
|
||
+ out.WriteString("¾")
|
||
+ return 2
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int {
|
||
+ nextChar := byte(0)
|
||
+ if len(text) > 1 {
|
||
+ nextChar = text[1]
|
||
+ }
|
||
+ if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) {
|
||
+ out.WriteString(""")
|
||
+ }
|
||
+
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ return r.smartDoubleQuoteVariant(out, previousChar, text, 'd')
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ return r.smartDoubleQuoteVariant(out, previousChar, text, 'a')
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ i := 0
|
||
+
|
||
+ for i < len(text) && text[i] != '>' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ out.Write(text[:i+1])
|
||
+ return i
|
||
+}
|
||
+
|
||
+type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int
|
||
+
|
||
+// NewSmartypantsRenderer constructs a Smartypants renderer object.
|
||
+func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer {
|
||
+ var (
|
||
+ r SPRenderer
|
||
+
|
||
+ smartAmpAngled = r.smartAmp(true, false)
|
||
+ smartAmpAngledNBSP = r.smartAmp(true, true)
|
||
+ smartAmpRegular = r.smartAmp(false, false)
|
||
+ smartAmpRegularNBSP = r.smartAmp(false, true)
|
||
+
|
||
+ addNBSP = flags&SmartypantsQuotesNBSP != 0
|
||
+ )
|
||
+
|
||
+ if flags&SmartypantsAngledQuotes == 0 {
|
||
+ r.callbacks['"'] = r.smartDoubleQuote
|
||
+ if !addNBSP {
|
||
+ r.callbacks['&'] = smartAmpRegular
|
||
+ } else {
|
||
+ r.callbacks['&'] = smartAmpRegularNBSP
|
||
+ }
|
||
+ } else {
|
||
+ r.callbacks['"'] = r.smartAngledDoubleQuote
|
||
+ if !addNBSP {
|
||
+ r.callbacks['&'] = smartAmpAngled
|
||
+ } else {
|
||
+ r.callbacks['&'] = smartAmpAngledNBSP
|
||
+ }
|
||
+ }
|
||
+ r.callbacks['\''] = r.smartSingleQuote
|
||
+ r.callbacks['('] = r.smartParens
|
||
+ if flags&SmartypantsDashes != 0 {
|
||
+ if flags&SmartypantsLatexDashes == 0 {
|
||
+ r.callbacks['-'] = r.smartDash
|
||
+ } else {
|
||
+ r.callbacks['-'] = r.smartDashLatex
|
||
+ }
|
||
+ }
|
||
+ r.callbacks['.'] = r.smartPeriod
|
||
+ if flags&SmartypantsFractions == 0 {
|
||
+ r.callbacks['1'] = r.smartNumber
|
||
+ r.callbacks['3'] = r.smartNumber
|
||
+ } else {
|
||
+ for ch := '1'; ch <= '9'; ch++ {
|
||
+ r.callbacks[ch] = r.smartNumberGeneric
|
||
+ }
|
||
+ }
|
||
+ r.callbacks['<'] = r.smartLeftAngle
|
||
+ r.callbacks['`'] = r.smartBacktick
|
||
+ return &r
|
||
+}
|
||
+
|
||
+// Process is the entry point of the Smartypants renderer.
|
||
+func (r *SPRenderer) Process(w io.Writer, text []byte) {
|
||
+ mark := 0
|
||
+ for i := 0; i < len(text); i++ {
|
||
+ if action := r.callbacks[text[i]]; action != nil {
|
||
+ if i > mark {
|
||
+ w.Write(text[mark:i])
|
||
+ }
|
||
+ previousChar := byte(0)
|
||
+ if i > 0 {
|
||
+ previousChar = text[i-1]
|
||
+ }
|
||
+ var tmp bytes.Buffer
|
||
+ i += action(&tmp, previousChar, text[i:])
|
||
+ w.Write(tmp.Bytes())
|
||
+ mark = i + 1
|
||
+ }
|
||
+ }
|
||
+ if mark < len(text) {
|
||
+ w.Write(text[mark:])
|
||
+ }
|
||
+}
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go
|
||
new file mode 100644
|
||
index 000000000000..b8c15ce88543
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/man_docs.go
|
||
@@ -0,0 +1,246 @@
|
||
+// Copyright 2013-2023 The Cobra Authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package doc
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "sort"
|
||
+ "strconv"
|
||
+ "strings"
|
||
+ "time"
|
||
+
|
||
+ "github.com/cpuguy83/go-md2man/v2/md2man"
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/pflag"
|
||
+)
|
||
+
|
||
+// GenManTree will generate a man page for this command and all descendants
|
||
+// in the directory given. The header may be nil. This function may not work
|
||
+// correctly if your command names have `-` in them. If you have `cmd` with two
|
||
+// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third`
|
||
+// it is undefined which help output will be in the file `cmd-sub-third.1`.
|
||
+func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error {
|
||
+ return GenManTreeFromOpts(cmd, GenManTreeOptions{
|
||
+ Header: header,
|
||
+ Path: dir,
|
||
+ CommandSeparator: "-",
|
||
+ })
|
||
+}
|
||
+
|
||
+// GenManTreeFromOpts generates a man page for the command and all descendants.
|
||
+// The pages are written to the opts.Path directory.
|
||
+func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error {
|
||
+ header := opts.Header
|
||
+ if header == nil {
|
||
+ header = &GenManHeader{}
|
||
+ }
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ if err := GenManTreeFromOpts(c, opts); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+ section := "1"
|
||
+ if header.Section != "" {
|
||
+ section = header.Section
|
||
+ }
|
||
+
|
||
+ separator := "_"
|
||
+ if opts.CommandSeparator != "" {
|
||
+ separator = opts.CommandSeparator
|
||
+ }
|
||
+ basename := strings.ReplaceAll(cmd.CommandPath(), " ", separator)
|
||
+ filename := filepath.Join(opts.Path, basename+"."+section)
|
||
+ f, err := os.Create(filename)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer f.Close()
|
||
+
|
||
+ headerCopy := *header
|
||
+ return GenMan(cmd, &headerCopy, f)
|
||
+}
|
||
+
|
||
+// GenManTreeOptions is the options for generating the man pages.
|
||
+// Used only in GenManTreeFromOpts.
|
||
+type GenManTreeOptions struct {
|
||
+ Header *GenManHeader
|
||
+ Path string
|
||
+ CommandSeparator string
|
||
+}
|
||
+
|
||
+// GenManHeader is a lot like the .TH header at the start of man pages. These
|
||
+// include the title, section, date, source, and manual. We will use the
|
||
+// current time if Date is unset and will use "Auto generated by spf13/cobra"
|
||
+// if the Source is unset.
|
||
+type GenManHeader struct {
|
||
+ Title string
|
||
+ Section string
|
||
+ Date *time.Time
|
||
+ date string
|
||
+ Source string
|
||
+ Manual string
|
||
+}
|
||
+
|
||
+// GenMan will generate a man page for the given command and write it to
|
||
+// w. The header argument may be nil, however obviously w may not.
|
||
+func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error {
|
||
+ if header == nil {
|
||
+ header = &GenManHeader{}
|
||
+ }
|
||
+ if err := fillHeader(header, cmd.CommandPath(), cmd.DisableAutoGenTag); err != nil {
|
||
+ return err
|
||
+ }
|
||
+
|
||
+ b := genMan(cmd, header)
|
||
+ _, err := w.Write(md2man.Render(b))
|
||
+ return err
|
||
+}
|
||
+
|
||
+func fillHeader(header *GenManHeader, name string, disableAutoGen bool) error {
|
||
+ if header.Title == "" {
|
||
+ header.Title = strings.ToUpper(strings.ReplaceAll(name, " ", "\\-"))
|
||
+ }
|
||
+ if header.Section == "" {
|
||
+ header.Section = "1"
|
||
+ }
|
||
+ if header.Date == nil {
|
||
+ now := time.Now()
|
||
+ if epoch := os.Getenv("SOURCE_DATE_EPOCH"); epoch != "" {
|
||
+ unixEpoch, err := strconv.ParseInt(epoch, 10, 64)
|
||
+ if err != nil {
|
||
+ return fmt.Errorf("invalid SOURCE_DATE_EPOCH: %v", err)
|
||
+ }
|
||
+ now = time.Unix(unixEpoch, 0)
|
||
+ }
|
||
+ header.Date = &now
|
||
+ }
|
||
+ header.date = (*header.Date).Format("Jan 2006")
|
||
+ if header.Source == "" && !disableAutoGen {
|
||
+ header.Source = "Auto generated by spf13/cobra"
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func manPreamble(buf io.StringWriter, header *GenManHeader, cmd *cobra.Command, dashedName string) {
|
||
+ description := cmd.Long
|
||
+ if len(description) == 0 {
|
||
+ description = cmd.Short
|
||
+ }
|
||
+
|
||
+ cobra.WriteStringAndCheck(buf, fmt.Sprintf(`%% "%s" "%s" "%s" "%s" "%s"
|
||
+# NAME
|
||
+`, header.Title, header.Section, header.date, header.Source, header.Manual))
|
||
+ cobra.WriteStringAndCheck(buf, fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short))
|
||
+ cobra.WriteStringAndCheck(buf, "# SYNOPSIS\n")
|
||
+ cobra.WriteStringAndCheck(buf, fmt.Sprintf("**%s**\n\n", cmd.UseLine()))
|
||
+ cobra.WriteStringAndCheck(buf, "# DESCRIPTION\n")
|
||
+ cobra.WriteStringAndCheck(buf, description+"\n\n")
|
||
+}
|
||
+
|
||
+func manPrintFlags(buf io.StringWriter, flags *pflag.FlagSet) {
|
||
+ flags.VisitAll(func(flag *pflag.Flag) {
|
||
+ if len(flag.Deprecated) > 0 || flag.Hidden {
|
||
+ return
|
||
+ }
|
||
+ format := ""
|
||
+ if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
|
||
+ format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name)
|
||
+ } else {
|
||
+ format = fmt.Sprintf("**--%s**", flag.Name)
|
||
+ }
|
||
+ if len(flag.NoOptDefVal) > 0 {
|
||
+ format += "["
|
||
+ }
|
||
+ if flag.Value.Type() == "string" {
|
||
+ // put quotes on the value
|
||
+ format += "=%q"
|
||
+ } else {
|
||
+ format += "=%s"
|
||
+ }
|
||
+ if len(flag.NoOptDefVal) > 0 {
|
||
+ format += "]"
|
||
+ }
|
||
+ format += "\n\t%s\n\n"
|
||
+ cobra.WriteStringAndCheck(buf, fmt.Sprintf(format, flag.DefValue, flag.Usage))
|
||
+ })
|
||
+}
|
||
+
|
||
+func manPrintOptions(buf io.StringWriter, command *cobra.Command) {
|
||
+ flags := command.NonInheritedFlags()
|
||
+ if flags.HasAvailableFlags() {
|
||
+ cobra.WriteStringAndCheck(buf, "# OPTIONS\n")
|
||
+ manPrintFlags(buf, flags)
|
||
+ cobra.WriteStringAndCheck(buf, "\n")
|
||
+ }
|
||
+ flags = command.InheritedFlags()
|
||
+ if flags.HasAvailableFlags() {
|
||
+ cobra.WriteStringAndCheck(buf, "# OPTIONS INHERITED FROM PARENT COMMANDS\n")
|
||
+ manPrintFlags(buf, flags)
|
||
+ cobra.WriteStringAndCheck(buf, "\n")
|
||
+ }
|
||
+}
|
||
+
|
||
+func genMan(cmd *cobra.Command, header *GenManHeader) []byte {
|
||
+ cmd.InitDefaultHelpCmd()
|
||
+ cmd.InitDefaultHelpFlag()
|
||
+
|
||
+ // something like `rootcmd-subcmd1-subcmd2`
|
||
+ dashCommandName := strings.ReplaceAll(cmd.CommandPath(), " ", "-")
|
||
+
|
||
+ buf := new(bytes.Buffer)
|
||
+
|
||
+ manPreamble(buf, header, cmd, dashCommandName)
|
||
+ manPrintOptions(buf, cmd)
|
||
+ if len(cmd.Example) > 0 {
|
||
+ buf.WriteString("# EXAMPLE\n")
|
||
+ buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example))
|
||
+ }
|
||
+ if hasSeeAlso(cmd) {
|
||
+ buf.WriteString("# SEE ALSO\n")
|
||
+ seealsos := make([]string, 0)
|
||
+ if cmd.HasParent() {
|
||
+ parentPath := cmd.Parent().CommandPath()
|
||
+ dashParentPath := strings.ReplaceAll(parentPath, " ", "-")
|
||
+ seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section)
|
||
+ seealsos = append(seealsos, seealso)
|
||
+ cmd.VisitParents(func(c *cobra.Command) {
|
||
+ if c.DisableAutoGenTag {
|
||
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
|
||
+ }
|
||
+ })
|
||
+ }
|
||
+ children := cmd.Commands()
|
||
+ sort.Sort(byName(children))
|
||
+ for _, c := range children {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section)
|
||
+ seealsos = append(seealsos, seealso)
|
||
+ }
|
||
+ buf.WriteString(strings.Join(seealsos, ", ") + "\n")
|
||
+ }
|
||
+ if !cmd.DisableAutoGenTag {
|
||
+ buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006")))
|
||
+ }
|
||
+ return buf.Bytes()
|
||
+}
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go
|
||
new file mode 100644
|
||
index 000000000000..f98fe2a3b8f3
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/md_docs.go
|
||
@@ -0,0 +1,158 @@
|
||
+// Copyright 2013-2023 The Cobra Authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package doc
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "sort"
|
||
+ "strings"
|
||
+ "time"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+)
|
||
+
|
||
+const markdownExtension = ".md"
|
||
+
|
||
+func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error {
|
||
+ flags := cmd.NonInheritedFlags()
|
||
+ flags.SetOutput(buf)
|
||
+ if flags.HasAvailableFlags() {
|
||
+ buf.WriteString("### Options\n\n```\n")
|
||
+ flags.PrintDefaults()
|
||
+ buf.WriteString("```\n\n")
|
||
+ }
|
||
+
|
||
+ parentFlags := cmd.InheritedFlags()
|
||
+ parentFlags.SetOutput(buf)
|
||
+ if parentFlags.HasAvailableFlags() {
|
||
+ buf.WriteString("### Options inherited from parent commands\n\n```\n")
|
||
+ parentFlags.PrintDefaults()
|
||
+ buf.WriteString("```\n\n")
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// GenMarkdown creates markdown output.
|
||
+func GenMarkdown(cmd *cobra.Command, w io.Writer) error {
|
||
+ return GenMarkdownCustom(cmd, w, func(s string) string { return s })
|
||
+}
|
||
+
|
||
+// GenMarkdownCustom creates custom markdown output.
|
||
+func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error {
|
||
+ cmd.InitDefaultHelpCmd()
|
||
+ cmd.InitDefaultHelpFlag()
|
||
+
|
||
+ buf := new(bytes.Buffer)
|
||
+ name := cmd.CommandPath()
|
||
+
|
||
+ buf.WriteString("## " + name + "\n\n")
|
||
+ buf.WriteString(cmd.Short + "\n\n")
|
||
+ if len(cmd.Long) > 0 {
|
||
+ buf.WriteString("### Synopsis\n\n")
|
||
+ buf.WriteString(cmd.Long + "\n\n")
|
||
+ }
|
||
+
|
||
+ if cmd.Runnable() {
|
||
+ buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine()))
|
||
+ }
|
||
+
|
||
+ if len(cmd.Example) > 0 {
|
||
+ buf.WriteString("### Examples\n\n")
|
||
+ buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example))
|
||
+ }
|
||
+
|
||
+ if err := printOptions(buf, cmd, name); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if hasSeeAlso(cmd) {
|
||
+ buf.WriteString("### SEE ALSO\n\n")
|
||
+ if cmd.HasParent() {
|
||
+ parent := cmd.Parent()
|
||
+ pname := parent.CommandPath()
|
||
+ link := pname + markdownExtension
|
||
+ link = strings.ReplaceAll(link, " ", "_")
|
||
+ buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short))
|
||
+ cmd.VisitParents(func(c *cobra.Command) {
|
||
+ if c.DisableAutoGenTag {
|
||
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
|
||
+ }
|
||
+ })
|
||
+ }
|
||
+
|
||
+ children := cmd.Commands()
|
||
+ sort.Sort(byName(children))
|
||
+
|
||
+ for _, child := range children {
|
||
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ cname := name + " " + child.Name()
|
||
+ link := cname + markdownExtension
|
||
+ link = strings.ReplaceAll(link, " ", "_")
|
||
+ buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short))
|
||
+ }
|
||
+ buf.WriteString("\n")
|
||
+ }
|
||
+ if !cmd.DisableAutoGenTag {
|
||
+ buf.WriteString("###### Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "\n")
|
||
+ }
|
||
+ _, err := buf.WriteTo(w)
|
||
+ return err
|
||
+}
|
||
+
|
||
+// GenMarkdownTree will generate a markdown page for this command and all
|
||
+// descendants in the directory given. The header may be nil.
|
||
+// This function may not work correctly if your command names have `-` in them.
|
||
+// If you have `cmd` with two subcmds, `sub` and `sub-third`,
|
||
+// and `sub` has a subcommand called `third`, it is undefined which
|
||
+// help output will be in the file `cmd-sub-third.1`.
|
||
+func GenMarkdownTree(cmd *cobra.Command, dir string) error {
|
||
+ identity := func(s string) string { return s }
|
||
+ emptyStr := func(s string) string { return "" }
|
||
+ return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity)
|
||
+}
|
||
+
|
||
+// GenMarkdownTreeCustom is the the same as GenMarkdownTree, but
|
||
+// with custom filePrepender and linkHandler.
|
||
+func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+
|
||
+ basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + markdownExtension
|
||
+ filename := filepath.Join(dir, basename)
|
||
+ f, err := os.Create(filename)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer f.Close()
|
||
+
|
||
+ if _, err := io.WriteString(f, filePrepender(filename)); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err := GenMarkdownCustom(cmd, f, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go
|
||
new file mode 100644
|
||
index 000000000000..2cca6fd778de
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go
|
||
@@ -0,0 +1,186 @@
|
||
+// Copyright 2013-2023 The Cobra Authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package doc
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "sort"
|
||
+ "strings"
|
||
+ "time"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+)
|
||
+
|
||
+func printOptionsReST(buf *bytes.Buffer, cmd *cobra.Command, name string) error {
|
||
+ flags := cmd.NonInheritedFlags()
|
||
+ flags.SetOutput(buf)
|
||
+ if flags.HasAvailableFlags() {
|
||
+ buf.WriteString("Options\n")
|
||
+ buf.WriteString("~~~~~~~\n\n::\n\n")
|
||
+ flags.PrintDefaults()
|
||
+ buf.WriteString("\n")
|
||
+ }
|
||
+
|
||
+ parentFlags := cmd.InheritedFlags()
|
||
+ parentFlags.SetOutput(buf)
|
||
+ if parentFlags.HasAvailableFlags() {
|
||
+ buf.WriteString("Options inherited from parent commands\n")
|
||
+ buf.WriteString("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n")
|
||
+ parentFlags.PrintDefaults()
|
||
+ buf.WriteString("\n")
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// defaultLinkHandler for default ReST hyperlink markup
|
||
+func defaultLinkHandler(name, ref string) string {
|
||
+ return fmt.Sprintf("`%s <%s.rst>`_", name, ref)
|
||
+}
|
||
+
|
||
+// GenReST creates reStructured Text output.
|
||
+func GenReST(cmd *cobra.Command, w io.Writer) error {
|
||
+ return GenReSTCustom(cmd, w, defaultLinkHandler)
|
||
+}
|
||
+
|
||
+// GenReSTCustom creates custom reStructured Text output.
|
||
+func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, string) string) error {
|
||
+ cmd.InitDefaultHelpCmd()
|
||
+ cmd.InitDefaultHelpFlag()
|
||
+
|
||
+ buf := new(bytes.Buffer)
|
||
+ name := cmd.CommandPath()
|
||
+
|
||
+ short := cmd.Short
|
||
+ long := cmd.Long
|
||
+ if len(long) == 0 {
|
||
+ long = short
|
||
+ }
|
||
+ ref := strings.ReplaceAll(name, " ", "_")
|
||
+
|
||
+ buf.WriteString(".. _" + ref + ":\n\n")
|
||
+ buf.WriteString(name + "\n")
|
||
+ buf.WriteString(strings.Repeat("-", len(name)) + "\n\n")
|
||
+ buf.WriteString(short + "\n\n")
|
||
+ buf.WriteString("Synopsis\n")
|
||
+ buf.WriteString("~~~~~~~~\n\n")
|
||
+ buf.WriteString("\n" + long + "\n\n")
|
||
+
|
||
+ if cmd.Runnable() {
|
||
+ buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine()))
|
||
+ }
|
||
+
|
||
+ if len(cmd.Example) > 0 {
|
||
+ buf.WriteString("Examples\n")
|
||
+ buf.WriteString("~~~~~~~~\n\n")
|
||
+ buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " ")))
|
||
+ }
|
||
+
|
||
+ if err := printOptionsReST(buf, cmd, name); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if hasSeeAlso(cmd) {
|
||
+ buf.WriteString("SEE ALSO\n")
|
||
+ buf.WriteString("~~~~~~~~\n\n")
|
||
+ if cmd.HasParent() {
|
||
+ parent := cmd.Parent()
|
||
+ pname := parent.CommandPath()
|
||
+ ref = strings.ReplaceAll(pname, " ", "_")
|
||
+ buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short))
|
||
+ cmd.VisitParents(func(c *cobra.Command) {
|
||
+ if c.DisableAutoGenTag {
|
||
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
|
||
+ }
|
||
+ })
|
||
+ }
|
||
+
|
||
+ children := cmd.Commands()
|
||
+ sort.Sort(byName(children))
|
||
+
|
||
+ for _, child := range children {
|
||
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ cname := name + " " + child.Name()
|
||
+ ref = strings.ReplaceAll(cname, " ", "_")
|
||
+ buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short))
|
||
+ }
|
||
+ buf.WriteString("\n")
|
||
+ }
|
||
+ if !cmd.DisableAutoGenTag {
|
||
+ buf.WriteString("*Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "*\n")
|
||
+ }
|
||
+ _, err := buf.WriteTo(w)
|
||
+ return err
|
||
+}
|
||
+
|
||
+// GenReSTTree will generate a ReST page for this command and all
|
||
+// descendants in the directory given.
|
||
+// This function may not work correctly if your command names have `-` in them.
|
||
+// If you have `cmd` with two subcmds, `sub` and `sub-third`,
|
||
+// and `sub` has a subcommand called `third`, it is undefined which
|
||
+// help output will be in the file `cmd-sub-third.1`.
|
||
+func GenReSTTree(cmd *cobra.Command, dir string) error {
|
||
+ emptyStr := func(s string) string { return "" }
|
||
+ return GenReSTTreeCustom(cmd, dir, emptyStr, defaultLinkHandler)
|
||
+}
|
||
+
|
||
+// GenReSTTreeCustom is the the same as GenReSTTree, but
|
||
+// with custom filePrepender and linkHandler.
|
||
+func GenReSTTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error {
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ if err := GenReSTTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+
|
||
+ basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".rst"
|
||
+ filename := filepath.Join(dir, basename)
|
||
+ f, err := os.Create(filename)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer f.Close()
|
||
+
|
||
+ if _, err := io.WriteString(f, filePrepender(filename)); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err := GenReSTCustom(cmd, f, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// indentString adapted from: https://github.com/kr/text/blob/main/indent.go
|
||
+func indentString(s, p string) string {
|
||
+ var res []byte
|
||
+ b := []byte(s)
|
||
+ prefix := []byte(p)
|
||
+ bol := true
|
||
+ for _, c := range b {
|
||
+ if bol && c != '\n' {
|
||
+ res = append(res, prefix...)
|
||
+ }
|
||
+ res = append(res, c)
|
||
+ bol = c == '\n'
|
||
+ }
|
||
+ return string(res)
|
||
+}
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go
|
||
new file mode 100644
|
||
index 000000000000..0aaa07a166d8
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/util.go
|
||
@@ -0,0 +1,52 @@
|
||
+// Copyright 2013-2023 The Cobra Authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package doc
|
||
+
|
||
+import (
|
||
+ "strings"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+)
|
||
+
|
||
+// Test to see if we have a reason to print See Also information in docs
|
||
+// Basically this is a test for a parent command or a subcommand which is
|
||
+// both not deprecated and not the autogenerated help command.
|
||
+func hasSeeAlso(cmd *cobra.Command) bool {
|
||
+ if cmd.HasParent() {
|
||
+ return true
|
||
+ }
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ return true
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// Temporary workaround for yaml lib generating incorrect yaml with long strings
|
||
+// that do not contain \n.
|
||
+func forceMultiLine(s string) string {
|
||
+ if len(s) > 60 && !strings.Contains(s, "\n") {
|
||
+ s = s + "\n"
|
||
+ }
|
||
+ return s
|
||
+}
|
||
+
|
||
+type byName []*cobra.Command
|
||
+
|
||
+func (s byName) Len() int { return len(s) }
|
||
+func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||
+func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go
|
||
new file mode 100644
|
||
index 000000000000..2b26d6ec0f3e
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go
|
||
@@ -0,0 +1,175 @@
|
||
+// Copyright 2013-2023 The Cobra Authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package doc
|
||
+
|
||
+import (
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "sort"
|
||
+ "strings"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/pflag"
|
||
+ "gopkg.in/yaml.v3"
|
||
+)
|
||
+
|
||
+type cmdOption struct {
|
||
+ Name string
|
||
+ Shorthand string `yaml:",omitempty"`
|
||
+ DefaultValue string `yaml:"default_value,omitempty"`
|
||
+ Usage string `yaml:",omitempty"`
|
||
+}
|
||
+
|
||
+type cmdDoc struct {
|
||
+ Name string
|
||
+ Synopsis string `yaml:",omitempty"`
|
||
+ Description string `yaml:",omitempty"`
|
||
+ Usage string `yaml:",omitempty"`
|
||
+ Options []cmdOption `yaml:",omitempty"`
|
||
+ InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"`
|
||
+ Example string `yaml:",omitempty"`
|
||
+ SeeAlso []string `yaml:"see_also,omitempty"`
|
||
+}
|
||
+
|
||
+// GenYamlTree creates yaml structured ref files for this command and all descendants
|
||
+// in the directory given. This function may not work
|
||
+// correctly if your command names have `-` in them. If you have `cmd` with two
|
||
+// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third`
|
||
+// it is undefined which help output will be in the file `cmd-sub-third.1`.
|
||
+func GenYamlTree(cmd *cobra.Command, dir string) error {
|
||
+ identity := func(s string) string { return s }
|
||
+ emptyStr := func(s string) string { return "" }
|
||
+ return GenYamlTreeCustom(cmd, dir, emptyStr, identity)
|
||
+}
|
||
+
|
||
+// GenYamlTreeCustom creates yaml structured ref files.
|
||
+func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+
|
||
+ basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".yaml"
|
||
+ filename := filepath.Join(dir, basename)
|
||
+ f, err := os.Create(filename)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer f.Close()
|
||
+
|
||
+ if _, err := io.WriteString(f, filePrepender(filename)); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err := GenYamlCustom(cmd, f, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// GenYaml creates yaml output.
|
||
+func GenYaml(cmd *cobra.Command, w io.Writer) error {
|
||
+ return GenYamlCustom(cmd, w, func(s string) string { return s })
|
||
+}
|
||
+
|
||
+// GenYamlCustom creates custom yaml output.
|
||
+func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error {
|
||
+ cmd.InitDefaultHelpCmd()
|
||
+ cmd.InitDefaultHelpFlag()
|
||
+
|
||
+ yamlDoc := cmdDoc{}
|
||
+ yamlDoc.Name = cmd.CommandPath()
|
||
+
|
||
+ yamlDoc.Synopsis = forceMultiLine(cmd.Short)
|
||
+ yamlDoc.Description = forceMultiLine(cmd.Long)
|
||
+
|
||
+ if cmd.Runnable() {
|
||
+ yamlDoc.Usage = cmd.UseLine()
|
||
+ }
|
||
+
|
||
+ if len(cmd.Example) > 0 {
|
||
+ yamlDoc.Example = cmd.Example
|
||
+ }
|
||
+
|
||
+ flags := cmd.NonInheritedFlags()
|
||
+ if flags.HasFlags() {
|
||
+ yamlDoc.Options = genFlagResult(flags)
|
||
+ }
|
||
+ flags = cmd.InheritedFlags()
|
||
+ if flags.HasFlags() {
|
||
+ yamlDoc.InheritedOptions = genFlagResult(flags)
|
||
+ }
|
||
+
|
||
+ if hasSeeAlso(cmd) {
|
||
+ result := []string{}
|
||
+ if cmd.HasParent() {
|
||
+ parent := cmd.Parent()
|
||
+ result = append(result, parent.CommandPath()+" - "+parent.Short)
|
||
+ }
|
||
+ children := cmd.Commands()
|
||
+ sort.Sort(byName(children))
|
||
+ for _, child := range children {
|
||
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ result = append(result, child.CommandPath()+" - "+child.Short)
|
||
+ }
|
||
+ yamlDoc.SeeAlso = result
|
||
+ }
|
||
+
|
||
+ final, err := yaml.Marshal(&yamlDoc)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+
|
||
+ if _, err := w.Write(final); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func genFlagResult(flags *pflag.FlagSet) []cmdOption {
|
||
+ var result []cmdOption
|
||
+
|
||
+ flags.VisitAll(func(flag *pflag.Flag) {
|
||
+ // Todo, when we mark a shorthand is deprecated, but specify an empty message.
|
||
+ // The flag.ShorthandDeprecated is empty as the shorthand is deprecated.
|
||
+ // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok.
|
||
+ if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 {
|
||
+ opt := cmdOption{
|
||
+ flag.Name,
|
||
+ flag.Shorthand,
|
||
+ flag.DefValue,
|
||
+ forceMultiLine(flag.Usage),
|
||
+ }
|
||
+ result = append(result, opt)
|
||
+ } else {
|
||
+ opt := cmdOption{
|
||
+ Name: flag.Name,
|
||
+ DefaultValue: forceMultiLine(flag.DefValue),
|
||
+ Usage: forceMultiLine(flag.Usage),
|
||
+ }
|
||
+ result = append(result, opt)
|
||
+ }
|
||
+ })
|
||
+
|
||
+ return result
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE
|
||
new file mode 100644
|
||
index 000000000000..2683e4bb1f24
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/LICENSE
|
||
@@ -0,0 +1,50 @@
|
||
+
|
||
+This project is covered by two different licenses: MIT and Apache.
|
||
+
|
||
+#### MIT License ####
|
||
+
|
||
+The following files were ported to Go from C files of libyaml, and thus
|
||
+are still covered by their original MIT license, with the additional
|
||
+copyright staring in 2011 when the project was ported over:
|
||
+
|
||
+ apic.go emitterc.go parserc.go readerc.go scannerc.go
|
||
+ writerc.go yamlh.go yamlprivateh.go
|
||
+
|
||
+Copyright (c) 2006-2010 Kirill Simonov
|
||
+Copyright (c) 2006-2011 Kirill Simonov
|
||
+
|
||
+Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||
+this software and associated documentation files (the "Software"), to deal in
|
||
+the Software without restriction, including without limitation the rights to
|
||
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||
+of the Software, and to permit persons to whom the Software is furnished to do
|
||
+so, subject to the following conditions:
|
||
+
|
||
+The above copyright notice and this permission notice shall be included in all
|
||
+copies or substantial portions of the Software.
|
||
+
|
||
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+SOFTWARE.
|
||
+
|
||
+### Apache License ###
|
||
+
|
||
+All the remaining project files are covered by the Apache license:
|
||
+
|
||
+Copyright (c) 2011-2019 Canonical Ltd
|
||
+
|
||
+Licensed under the Apache License, Version 2.0 (the "License");
|
||
+you may not use this file except in compliance with the License.
|
||
+You may obtain a copy of the License at
|
||
+
|
||
+ http://www.apache.org/licenses/LICENSE-2.0
|
||
+
|
||
+Unless required by applicable law or agreed to in writing, software
|
||
+distributed under the License is distributed on an "AS IS" BASIS,
|
||
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+See the License for the specific language governing permissions and
|
||
+limitations under the License.
|
||
diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE
|
||
new file mode 100644
|
||
index 000000000000..866d74a7ad79
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/NOTICE
|
||
@@ -0,0 +1,13 @@
|
||
+Copyright 2011-2016 Canonical Ltd.
|
||
+
|
||
+Licensed under the Apache License, Version 2.0 (the "License");
|
||
+you may not use this file except in compliance with the License.
|
||
+You may obtain a copy of the License at
|
||
+
|
||
+ http://www.apache.org/licenses/LICENSE-2.0
|
||
+
|
||
+Unless required by applicable law or agreed to in writing, software
|
||
+distributed under the License is distributed on an "AS IS" BASIS,
|
||
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+See the License for the specific language governing permissions and
|
||
+limitations under the License.
|
||
diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md
|
||
new file mode 100644
|
||
index 000000000000..08eb1babddfa
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/README.md
|
||
@@ -0,0 +1,150 @@
|
||
+# YAML support for the Go language
|
||
+
|
||
+Introduction
|
||
+------------
|
||
+
|
||
+The yaml package enables Go programs to comfortably encode and decode YAML
|
||
+values. It was developed within [Canonical](https://www.canonical.com) as
|
||
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||
+C library to parse and generate YAML data quickly and reliably.
|
||
+
|
||
+Compatibility
|
||
+-------------
|
||
+
|
||
+The yaml package supports most of YAML 1.2, but preserves some behavior
|
||
+from 1.1 for backwards compatibility.
|
||
+
|
||
+Specifically, as of v3 of the yaml package:
|
||
+
|
||
+ - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
|
||
+ decoded into a typed bool value. Otherwise they behave as a string. Booleans
|
||
+ in YAML 1.2 are _true/false_ only.
|
||
+ - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
|
||
+ as specified in YAML 1.2, because most parsers still use the old format.
|
||
+ Octals in the _0o777_ format are supported though, so new files work.
|
||
+ - Does not support base-60 floats. These are gone from YAML 1.2, and were
|
||
+ actually never supported by this package as it's clearly a poor choice.
|
||
+
|
||
+and offers backwards
|
||
+compatibility with YAML 1.1 in some cases.
|
||
+1.2, including support for
|
||
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||
+implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||
+supported since they're a poor design and are gone in YAML 1.2.
|
||
+
|
||
+Installation and usage
|
||
+----------------------
|
||
+
|
||
+The import path for the package is *gopkg.in/yaml.v3*.
|
||
+
|
||
+To install it, run:
|
||
+
|
||
+ go get gopkg.in/yaml.v3
|
||
+
|
||
+API documentation
|
||
+-----------------
|
||
+
|
||
+If opened in a browser, the import path itself leads to the API documentation:
|
||
+
|
||
+ - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
|
||
+
|
||
+API stability
|
||
+-------------
|
||
+
|
||
+The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||
+
|
||
+
|
||
+License
|
||
+-------
|
||
+
|
||
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
|
||
+Please see the LICENSE file for details.
|
||
+
|
||
+
|
||
+Example
|
||
+-------
|
||
+
|
||
+```Go
|
||
+package main
|
||
+
|
||
+import (
|
||
+ "fmt"
|
||
+ "log"
|
||
+
|
||
+ "gopkg.in/yaml.v3"
|
||
+)
|
||
+
|
||
+var data = `
|
||
+a: Easy!
|
||
+b:
|
||
+ c: 2
|
||
+ d: [3, 4]
|
||
+`
|
||
+
|
||
+// Note: struct fields must be public in order for unmarshal to
|
||
+// correctly populate the data.
|
||
+type T struct {
|
||
+ A string
|
||
+ B struct {
|
||
+ RenamedC int `yaml:"c"`
|
||
+ D []int `yaml:",flow"`
|
||
+ }
|
||
+}
|
||
+
|
||
+func main() {
|
||
+ t := T{}
|
||
+
|
||
+ err := yaml.Unmarshal([]byte(data), &t)
|
||
+ if err != nil {
|
||
+ log.Fatalf("error: %v", err)
|
||
+ }
|
||
+ fmt.Printf("--- t:\n%v\n\n", t)
|
||
+
|
||
+ d, err := yaml.Marshal(&t)
|
||
+ if err != nil {
|
||
+ log.Fatalf("error: %v", err)
|
||
+ }
|
||
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||
+
|
||
+ m := make(map[interface{}]interface{})
|
||
+
|
||
+ err = yaml.Unmarshal([]byte(data), &m)
|
||
+ if err != nil {
|
||
+ log.Fatalf("error: %v", err)
|
||
+ }
|
||
+ fmt.Printf("--- m:\n%v\n\n", m)
|
||
+
|
||
+ d, err = yaml.Marshal(&m)
|
||
+ if err != nil {
|
||
+ log.Fatalf("error: %v", err)
|
||
+ }
|
||
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||
+}
|
||
+```
|
||
+
|
||
+This example will generate the following output:
|
||
+
|
||
+```
|
||
+--- t:
|
||
+{Easy! {2 [3 4]}}
|
||
+
|
||
+--- t dump:
|
||
+a: Easy!
|
||
+b:
|
||
+ c: 2
|
||
+ d: [3, 4]
|
||
+
|
||
+
|
||
+--- m:
|
||
+map[a:Easy! b:map[c:2 d:[3 4]]]
|
||
+
|
||
+--- m dump:
|
||
+a: Easy!
|
||
+b:
|
||
+ c: 2
|
||
+ d:
|
||
+ - 3
|
||
+ - 4
|
||
+```
|
||
+
|
||
diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go
|
||
new file mode 100644
|
||
index 000000000000..ae7d049f182a
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/apic.go
|
||
@@ -0,0 +1,747 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+// Copyright (c) 2006-2010 Kirill Simonov
|
||
+//
|
||
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||
+// this software and associated documentation files (the "Software"), to deal in
|
||
+// the Software without restriction, including without limitation the rights to
|
||
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||
+// of the Software, and to permit persons to whom the Software is furnished to do
|
||
+// so, subject to the following conditions:
|
||
+//
|
||
+// The above copyright notice and this permission notice shall be included in all
|
||
+// copies or substantial portions of the Software.
|
||
+//
|
||
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+// SOFTWARE.
|
||
+
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "io"
|
||
+)
|
||
+
|
||
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
||
+
|
||
+ // Check if we can move the queue at the beginning of the buffer.
|
||
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
||
+ if parser.tokens_head != len(parser.tokens) {
|
||
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
||
+ }
|
||
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
||
+ parser.tokens_head = 0
|
||
+ }
|
||
+ parser.tokens = append(parser.tokens, *token)
|
||
+ if pos < 0 {
|
||
+ return
|
||
+ }
|
||
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
||
+ parser.tokens[parser.tokens_head+pos] = *token
|
||
+}
|
||
+
|
||
+// Create a new parser object.
|
||
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
||
+ *parser = yaml_parser_t{
|
||
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
||
+ buffer: make([]byte, 0, input_buffer_size),
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Destroy a parser object.
|
||
+func yaml_parser_delete(parser *yaml_parser_t) {
|
||
+ *parser = yaml_parser_t{}
|
||
+}
|
||
+
|
||
+// String read handler.
|
||
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||
+ if parser.input_pos == len(parser.input) {
|
||
+ return 0, io.EOF
|
||
+ }
|
||
+ n = copy(buffer, parser.input[parser.input_pos:])
|
||
+ parser.input_pos += n
|
||
+ return n, nil
|
||
+}
|
||
+
|
||
+// Reader read handler.
|
||
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||
+ return parser.input_reader.Read(buffer)
|
||
+}
|
||
+
|
||
+// Set a string input.
|
||
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||
+ if parser.read_handler != nil {
|
||
+ panic("must set the input source only once")
|
||
+ }
|
||
+ parser.read_handler = yaml_string_read_handler
|
||
+ parser.input = input
|
||
+ parser.input_pos = 0
|
||
+}
|
||
+
|
||
+// Set a file input.
|
||
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
|
||
+ if parser.read_handler != nil {
|
||
+ panic("must set the input source only once")
|
||
+ }
|
||
+ parser.read_handler = yaml_reader_read_handler
|
||
+ parser.input_reader = r
|
||
+}
|
||
+
|
||
+// Set the source encoding.
|
||
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||
+ if parser.encoding != yaml_ANY_ENCODING {
|
||
+ panic("must set the encoding only once")
|
||
+ }
|
||
+ parser.encoding = encoding
|
||
+}
|
||
+
|
||
+// Create a new emitter object.
|
||
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||
+ *emitter = yaml_emitter_t{
|
||
+ buffer: make([]byte, output_buffer_size),
|
||
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||
+ events: make([]yaml_event_t, 0, initial_queue_size),
|
||
+ best_width: -1,
|
||
+ }
|
||
+}
|
||
+
|
||
+// Destroy an emitter object.
|
||
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
||
+ *emitter = yaml_emitter_t{}
|
||
+}
|
||
+
|
||
+// String write handler.
|
||
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// yaml_writer_write_handler uses emitter.output_writer to write the
|
||
+// emitted text.
|
||
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||
+ _, err := emitter.output_writer.Write(buffer)
|
||
+ return err
|
||
+}
|
||
+
|
||
+// Set a string output.
|
||
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
||
+ if emitter.write_handler != nil {
|
||
+ panic("must set the output target only once")
|
||
+ }
|
||
+ emitter.write_handler = yaml_string_write_handler
|
||
+ emitter.output_buffer = output_buffer
|
||
+}
|
||
+
|
||
+// Set a file output.
|
||
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
||
+ if emitter.write_handler != nil {
|
||
+ panic("must set the output target only once")
|
||
+ }
|
||
+ emitter.write_handler = yaml_writer_write_handler
|
||
+ emitter.output_writer = w
|
||
+}
|
||
+
|
||
+// Set the output encoding.
|
||
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
||
+ if emitter.encoding != yaml_ANY_ENCODING {
|
||
+ panic("must set the output encoding only once")
|
||
+ }
|
||
+ emitter.encoding = encoding
|
||
+}
|
||
+
|
||
+// Set the canonical output style.
|
||
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
||
+ emitter.canonical = canonical
|
||
+}
|
||
+
|
||
+// Set the indentation increment.
|
||
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
||
+ if indent < 2 || indent > 9 {
|
||
+ indent = 2
|
||
+ }
|
||
+ emitter.best_indent = indent
|
||
+}
|
||
+
|
||
+// Set the preferred line width.
|
||
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
||
+ if width < 0 {
|
||
+ width = -1
|
||
+ }
|
||
+ emitter.best_width = width
|
||
+}
|
||
+
|
||
+// Set if unescaped non-ASCII characters are allowed.
|
||
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
||
+ emitter.unicode = unicode
|
||
+}
|
||
+
|
||
+// Set the preferred line break character.
|
||
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||
+ emitter.line_break = line_break
|
||
+}
|
||
+
|
||
+///*
|
||
+// * Destroy a token object.
|
||
+// */
|
||
+//
|
||
+//YAML_DECLARE(void)
|
||
+//yaml_token_delete(yaml_token_t *token)
|
||
+//{
|
||
+// assert(token); // Non-NULL token object expected.
|
||
+//
|
||
+// switch (token.type)
|
||
+// {
|
||
+// case YAML_TAG_DIRECTIVE_TOKEN:
|
||
+// yaml_free(token.data.tag_directive.handle);
|
||
+// yaml_free(token.data.tag_directive.prefix);
|
||
+// break;
|
||
+//
|
||
+// case YAML_ALIAS_TOKEN:
|
||
+// yaml_free(token.data.alias.value);
|
||
+// break;
|
||
+//
|
||
+// case YAML_ANCHOR_TOKEN:
|
||
+// yaml_free(token.data.anchor.value);
|
||
+// break;
|
||
+//
|
||
+// case YAML_TAG_TOKEN:
|
||
+// yaml_free(token.data.tag.handle);
|
||
+// yaml_free(token.data.tag.suffix);
|
||
+// break;
|
||
+//
|
||
+// case YAML_SCALAR_TOKEN:
|
||
+// yaml_free(token.data.scalar.value);
|
||
+// break;
|
||
+//
|
||
+// default:
|
||
+// break;
|
||
+// }
|
||
+//
|
||
+// memset(token, 0, sizeof(yaml_token_t));
|
||
+//}
|
||
+//
|
||
+///*
|
||
+// * Check if a string is a valid UTF-8 sequence.
|
||
+// *
|
||
+// * Check 'reader.c' for more details on UTF-8 encoding.
|
||
+// */
|
||
+//
|
||
+//static int
|
||
+//yaml_check_utf8(yaml_char_t *start, size_t length)
|
||
+//{
|
||
+// yaml_char_t *end = start+length;
|
||
+// yaml_char_t *pointer = start;
|
||
+//
|
||
+// while (pointer < end) {
|
||
+// unsigned char octet;
|
||
+// unsigned int width;
|
||
+// unsigned int value;
|
||
+// size_t k;
|
||
+//
|
||
+// octet = pointer[0];
|
||
+// width = (octet & 0x80) == 0x00 ? 1 :
|
||
+// (octet & 0xE0) == 0xC0 ? 2 :
|
||
+// (octet & 0xF0) == 0xE0 ? 3 :
|
||
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
||
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
||
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
||
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
||
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
||
+// if (!width) return 0;
|
||
+// if (pointer+width > end) return 0;
|
||
+// for (k = 1; k < width; k ++) {
|
||
+// octet = pointer[k];
|
||
+// if ((octet & 0xC0) != 0x80) return 0;
|
||
+// value = (value << 6) + (octet & 0x3F);
|
||
+// }
|
||
+// if (!((width == 1) ||
|
||
+// (width == 2 && value >= 0x80) ||
|
||
+// (width == 3 && value >= 0x800) ||
|
||
+// (width == 4 && value >= 0x10000))) return 0;
|
||
+//
|
||
+// pointer += width;
|
||
+// }
|
||
+//
|
||
+// return 1;
|
||
+//}
|
||
+//
|
||
+
|
||
+// Create STREAM-START.
|
||
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_STREAM_START_EVENT,
|
||
+ encoding: encoding,
|
||
+ }
|
||
+}
|
||
+
|
||
+// Create STREAM-END.
|
||
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_STREAM_END_EVENT,
|
||
+ }
|
||
+}
|
||
+
|
||
+// Create DOCUMENT-START.
|
||
+func yaml_document_start_event_initialize(
|
||
+ event *yaml_event_t,
|
||
+ version_directive *yaml_version_directive_t,
|
||
+ tag_directives []yaml_tag_directive_t,
|
||
+ implicit bool,
|
||
+) {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_DOCUMENT_START_EVENT,
|
||
+ version_directive: version_directive,
|
||
+ tag_directives: tag_directives,
|
||
+ implicit: implicit,
|
||
+ }
|
||
+}
|
||
+
|
||
+// Create DOCUMENT-END.
|
||
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_DOCUMENT_END_EVENT,
|
||
+ implicit: implicit,
|
||
+ }
|
||
+}
|
||
+
|
||
+// Create ALIAS.
|
||
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_ALIAS_EVENT,
|
||
+ anchor: anchor,
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Create SCALAR.
|
||
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SCALAR_EVENT,
|
||
+ anchor: anchor,
|
||
+ tag: tag,
|
||
+ value: value,
|
||
+ implicit: plain_implicit,
|
||
+ quoted_implicit: quoted_implicit,
|
||
+ style: yaml_style_t(style),
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Create SEQUENCE-START.
|
||
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SEQUENCE_START_EVENT,
|
||
+ anchor: anchor,
|
||
+ tag: tag,
|
||
+ implicit: implicit,
|
||
+ style: yaml_style_t(style),
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Create SEQUENCE-END.
|
||
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SEQUENCE_END_EVENT,
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Create MAPPING-START.
|
||
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_MAPPING_START_EVENT,
|
||
+ anchor: anchor,
|
||
+ tag: tag,
|
||
+ implicit: implicit,
|
||
+ style: yaml_style_t(style),
|
||
+ }
|
||
+}
|
||
+
|
||
+// Create MAPPING-END.
|
||
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_MAPPING_END_EVENT,
|
||
+ }
|
||
+}
|
||
+
|
||
+// Destroy an event object.
|
||
+func yaml_event_delete(event *yaml_event_t) {
|
||
+ *event = yaml_event_t{}
|
||
+}
|
||
+
|
||
+///*
|
||
+// * Create a document object.
|
||
+// */
|
||
+//
|
||
+//YAML_DECLARE(int)
|
||
+//yaml_document_initialize(document *yaml_document_t,
|
||
+// version_directive *yaml_version_directive_t,
|
||
+// tag_directives_start *yaml_tag_directive_t,
|
||
+// tag_directives_end *yaml_tag_directive_t,
|
||
+// start_implicit int, end_implicit int)
|
||
+//{
|
||
+// struct {
|
||
+// error yaml_error_type_t
|
||
+// } context
|
||
+// struct {
|
||
+// start *yaml_node_t
|
||
+// end *yaml_node_t
|
||
+// top *yaml_node_t
|
||
+// } nodes = { NULL, NULL, NULL }
|
||
+// version_directive_copy *yaml_version_directive_t = NULL
|
||
+// struct {
|
||
+// start *yaml_tag_directive_t
|
||
+// end *yaml_tag_directive_t
|
||
+// top *yaml_tag_directive_t
|
||
+// } tag_directives_copy = { NULL, NULL, NULL }
|
||
+// value yaml_tag_directive_t = { NULL, NULL }
|
||
+// mark yaml_mark_t = { 0, 0, 0 }
|
||
+//
|
||
+// assert(document) // Non-NULL document object is expected.
|
||
+// assert((tag_directives_start && tag_directives_end) ||
|
||
+// (tag_directives_start == tag_directives_end))
|
||
+// // Valid tag directives are expected.
|
||
+//
|
||
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
||
+//
|
||
+// if (version_directive) {
|
||
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
||
+// if (!version_directive_copy) goto error
|
||
+// version_directive_copy.major = version_directive.major
|
||
+// version_directive_copy.minor = version_directive.minor
|
||
+// }
|
||
+//
|
||
+// if (tag_directives_start != tag_directives_end) {
|
||
+// tag_directive *yaml_tag_directive_t
|
||
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
||
+// goto error
|
||
+// for (tag_directive = tag_directives_start
|
||
+// tag_directive != tag_directives_end; tag_directive ++) {
|
||
+// assert(tag_directive.handle)
|
||
+// assert(tag_directive.prefix)
|
||
+// if (!yaml_check_utf8(tag_directive.handle,
|
||
+// strlen((char *)tag_directive.handle)))
|
||
+// goto error
|
||
+// if (!yaml_check_utf8(tag_directive.prefix,
|
||
+// strlen((char *)tag_directive.prefix)))
|
||
+// goto error
|
||
+// value.handle = yaml_strdup(tag_directive.handle)
|
||
+// value.prefix = yaml_strdup(tag_directive.prefix)
|
||
+// if (!value.handle || !value.prefix) goto error
|
||
+// if (!PUSH(&context, tag_directives_copy, value))
|
||
+// goto error
|
||
+// value.handle = NULL
|
||
+// value.prefix = NULL
|
||
+// }
|
||
+// }
|
||
+//
|
||
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
||
+// tag_directives_copy.start, tag_directives_copy.top,
|
||
+// start_implicit, end_implicit, mark, mark)
|
||
+//
|
||
+// return 1
|
||
+//
|
||
+//error:
|
||
+// STACK_DEL(&context, nodes)
|
||
+// yaml_free(version_directive_copy)
|
||
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
||
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
||
+// yaml_free(value.handle)
|
||
+// yaml_free(value.prefix)
|
||
+// }
|
||
+// STACK_DEL(&context, tag_directives_copy)
|
||
+// yaml_free(value.handle)
|
||
+// yaml_free(value.prefix)
|
||
+//
|
||
+// return 0
|
||
+//}
|
||
+//
|
||
+///*
|
||
+// * Destroy a document object.
|
||
+// */
|
||
+//
|
||
+//YAML_DECLARE(void)
|
||
+//yaml_document_delete(document *yaml_document_t)
|
||
+//{
|
||
+// struct {
|
||
+// error yaml_error_type_t
|
||
+// } context
|
||
+// tag_directive *yaml_tag_directive_t
|
||
+//
|
||
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
||
+//
|
||
+// assert(document) // Non-NULL document object is expected.
|
||
+//
|
||
+// while (!STACK_EMPTY(&context, document.nodes)) {
|
||
+// node yaml_node_t = POP(&context, document.nodes)
|
||
+// yaml_free(node.tag)
|
||
+// switch (node.type) {
|
||
+// case YAML_SCALAR_NODE:
|
||
+// yaml_free(node.data.scalar.value)
|
||
+// break
|
||
+// case YAML_SEQUENCE_NODE:
|
||
+// STACK_DEL(&context, node.data.sequence.items)
|
||
+// break
|
||
+// case YAML_MAPPING_NODE:
|
||
+// STACK_DEL(&context, node.data.mapping.pairs)
|
||
+// break
|
||
+// default:
|
||
+// assert(0) // Should not happen.
|
||
+// }
|
||
+// }
|
||
+// STACK_DEL(&context, document.nodes)
|
||
+//
|
||
+// yaml_free(document.version_directive)
|
||
+// for (tag_directive = document.tag_directives.start
|
||
+// tag_directive != document.tag_directives.end
|
||
+// tag_directive++) {
|
||
+// yaml_free(tag_directive.handle)
|
||
+// yaml_free(tag_directive.prefix)
|
||
+// }
|
||
+// yaml_free(document.tag_directives.start)
|
||
+//
|
||
+// memset(document, 0, sizeof(yaml_document_t))
|
||
+//}
|
||
+//
|
||
+///**
|
||
+// * Get a document node.
|
||
+// */
|
||
+//
|
||
+//YAML_DECLARE(yaml_node_t *)
|
||
+//yaml_document_get_node(document *yaml_document_t, index int)
|
||
+//{
|
||
+// assert(document) // Non-NULL document object is expected.
|
||
+//
|
||
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
||
+// return document.nodes.start + index - 1
|
||
+// }
|
||
+// return NULL
|
||
+//}
|
||
+//
|
||
+///**
|
||
+// * Get the root object.
|
||
+// */
|
||
+//
|
||
+//YAML_DECLARE(yaml_node_t *)
|
||
+//yaml_document_get_root_node(document *yaml_document_t)
|
||
+//{
|
||
+// assert(document) // Non-NULL document object is expected.
|
||
+//
|
||
+// if (document.nodes.top != document.nodes.start) {
|
||
+// return document.nodes.start
|
||
+// }
|
||
+// return NULL
|
||
+//}
|
||
+//
|
||
+///*
|
||
+// * Add a scalar node to a document.
|
||
+// */
|
||
+//
|
||
+//YAML_DECLARE(int)
|
||
+//yaml_document_add_scalar(document *yaml_document_t,
|
||
+// tag *yaml_char_t, value *yaml_char_t, length int,
|
||
+// style yaml_scalar_style_t)
|
||
+//{
|
||
+// struct {
|
||
+// error yaml_error_type_t
|
||
+// } context
|
||
+// mark yaml_mark_t = { 0, 0, 0 }
|
||
+// tag_copy *yaml_char_t = NULL
|
||
+// value_copy *yaml_char_t = NULL
|
||
+// node yaml_node_t
|
||
+//
|
||
+// assert(document) // Non-NULL document object is expected.
|
||
+// assert(value) // Non-NULL value is expected.
|
||
+//
|
||
+// if (!tag) {
|
||
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
||
+// }
|
||
+//
|
||
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||
+// tag_copy = yaml_strdup(tag)
|
||
+// if (!tag_copy) goto error
|
||
+//
|
||
+// if (length < 0) {
|
||
+// length = strlen((char *)value)
|
||
+// }
|
||
+//
|
||
+// if (!yaml_check_utf8(value, length)) goto error
|
||
+// value_copy = yaml_malloc(length+1)
|
||
+// if (!value_copy) goto error
|
||
+// memcpy(value_copy, value, length)
|
||
+// value_copy[length] = '\0'
|
||
+//
|
||
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
||
+// if (!PUSH(&context, document.nodes, node)) goto error
|
||
+//
|
||
+// return document.nodes.top - document.nodes.start
|
||
+//
|
||
+//error:
|
||
+// yaml_free(tag_copy)
|
||
+// yaml_free(value_copy)
|
||
+//
|
||
+// return 0
|
||
+//}
|
||
+//
|
||
+///*
|
||
+// * Add a sequence node to a document.
|
||
+// */
|
||
+//
|
||
+//YAML_DECLARE(int)
|
||
+//yaml_document_add_sequence(document *yaml_document_t,
|
||
+// tag *yaml_char_t, style yaml_sequence_style_t)
|
||
+//{
|
||
+// struct {
|
||
+// error yaml_error_type_t
|
||
+// } context
|
||
+// mark yaml_mark_t = { 0, 0, 0 }
|
||
+// tag_copy *yaml_char_t = NULL
|
||
+// struct {
|
||
+// start *yaml_node_item_t
|
||
+// end *yaml_node_item_t
|
||
+// top *yaml_node_item_t
|
||
+// } items = { NULL, NULL, NULL }
|
||
+// node yaml_node_t
|
||
+//
|
||
+// assert(document) // Non-NULL document object is expected.
|
||
+//
|
||
+// if (!tag) {
|
||
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
||
+// }
|
||
+//
|
||
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||
+// tag_copy = yaml_strdup(tag)
|
||
+// if (!tag_copy) goto error
|
||
+//
|
||
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
||
+//
|
||
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
||
+// style, mark, mark)
|
||
+// if (!PUSH(&context, document.nodes, node)) goto error
|
||
+//
|
||
+// return document.nodes.top - document.nodes.start
|
||
+//
|
||
+//error:
|
||
+// STACK_DEL(&context, items)
|
||
+// yaml_free(tag_copy)
|
||
+//
|
||
+// return 0
|
||
+//}
|
||
+//
|
||
+///*
|
||
+// * Add a mapping node to a document.
|
||
+// */
|
||
+//
|
||
+//YAML_DECLARE(int)
|
||
+//yaml_document_add_mapping(document *yaml_document_t,
|
||
+// tag *yaml_char_t, style yaml_mapping_style_t)
|
||
+//{
|
||
+// struct {
|
||
+// error yaml_error_type_t
|
||
+// } context
|
||
+// mark yaml_mark_t = { 0, 0, 0 }
|
||
+// tag_copy *yaml_char_t = NULL
|
||
+// struct {
|
||
+// start *yaml_node_pair_t
|
||
+// end *yaml_node_pair_t
|
||
+// top *yaml_node_pair_t
|
||
+// } pairs = { NULL, NULL, NULL }
|
||
+// node yaml_node_t
|
||
+//
|
||
+// assert(document) // Non-NULL document object is expected.
|
||
+//
|
||
+// if (!tag) {
|
||
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
||
+// }
|
||
+//
|
||
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||
+// tag_copy = yaml_strdup(tag)
|
||
+// if (!tag_copy) goto error
|
||
+//
|
||
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
||
+//
|
||
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
||
+// style, mark, mark)
|
||
+// if (!PUSH(&context, document.nodes, node)) goto error
|
||
+//
|
||
+// return document.nodes.top - document.nodes.start
|
||
+//
|
||
+//error:
|
||
+// STACK_DEL(&context, pairs)
|
||
+// yaml_free(tag_copy)
|
||
+//
|
||
+// return 0
|
||
+//}
|
||
+//
|
||
+///*
|
||
+// * Append an item to a sequence node.
|
||
+// */
|
||
+//
|
||
+//YAML_DECLARE(int)
|
||
+//yaml_document_append_sequence_item(document *yaml_document_t,
|
||
+// sequence int, item int)
|
||
+//{
|
||
+// struct {
|
||
+// error yaml_error_type_t
|
||
+// } context
|
||
+//
|
||
+// assert(document) // Non-NULL document is required.
|
||
+// assert(sequence > 0
|
||
+// && document.nodes.start + sequence <= document.nodes.top)
|
||
+// // Valid sequence id is required.
|
||
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
||
+// // A sequence node is required.
|
||
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
||
+// // Valid item id is required.
|
||
+//
|
||
+// if (!PUSH(&context,
|
||
+// document.nodes.start[sequence-1].data.sequence.items, item))
|
||
+// return 0
|
||
+//
|
||
+// return 1
|
||
+//}
|
||
+//
|
||
+///*
|
||
+// * Append a pair of a key and a value to a mapping node.
|
||
+// */
|
||
+//
|
||
+//YAML_DECLARE(int)
|
||
+//yaml_document_append_mapping_pair(document *yaml_document_t,
|
||
+// mapping int, key int, value int)
|
||
+//{
|
||
+// struct {
|
||
+// error yaml_error_type_t
|
||
+// } context
|
||
+//
|
||
+// pair yaml_node_pair_t
|
||
+//
|
||
+// assert(document) // Non-NULL document is required.
|
||
+// assert(mapping > 0
|
||
+// && document.nodes.start + mapping <= document.nodes.top)
|
||
+// // Valid mapping id is required.
|
||
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
||
+// // A mapping node is required.
|
||
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
||
+// // Valid key id is required.
|
||
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
||
+// // Valid value id is required.
|
||
+//
|
||
+// pair.key = key
|
||
+// pair.value = value
|
||
+//
|
||
+// if (!PUSH(&context,
|
||
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
||
+// return 0
|
||
+//
|
||
+// return 1
|
||
+//}
|
||
+//
|
||
+//
|
||
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go
|
||
new file mode 100644
|
||
index 000000000000..0173b6982e84
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/decode.go
|
||
@@ -0,0 +1,1000 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "encoding"
|
||
+ "encoding/base64"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "math"
|
||
+ "reflect"
|
||
+ "strconv"
|
||
+ "time"
|
||
+)
|
||
+
|
||
+// ----------------------------------------------------------------------------
|
||
+// Parser, produces a node tree out of a libyaml event stream.
|
||
+
|
||
+type parser struct {
|
||
+ parser yaml_parser_t
|
||
+ event yaml_event_t
|
||
+ doc *Node
|
||
+ anchors map[string]*Node
|
||
+ doneInit bool
|
||
+ textless bool
|
||
+}
|
||
+
|
||
+func newParser(b []byte) *parser {
|
||
+ p := parser{}
|
||
+ if !yaml_parser_initialize(&p.parser) {
|
||
+ panic("failed to initialize YAML emitter")
|
||
+ }
|
||
+ if len(b) == 0 {
|
||
+ b = []byte{'\n'}
|
||
+ }
|
||
+ yaml_parser_set_input_string(&p.parser, b)
|
||
+ return &p
|
||
+}
|
||
+
|
||
+func newParserFromReader(r io.Reader) *parser {
|
||
+ p := parser{}
|
||
+ if !yaml_parser_initialize(&p.parser) {
|
||
+ panic("failed to initialize YAML emitter")
|
||
+ }
|
||
+ yaml_parser_set_input_reader(&p.parser, r)
|
||
+ return &p
|
||
+}
|
||
+
|
||
+func (p *parser) init() {
|
||
+ if p.doneInit {
|
||
+ return
|
||
+ }
|
||
+ p.anchors = make(map[string]*Node)
|
||
+ p.expect(yaml_STREAM_START_EVENT)
|
||
+ p.doneInit = true
|
||
+}
|
||
+
|
||
+func (p *parser) destroy() {
|
||
+ if p.event.typ != yaml_NO_EVENT {
|
||
+ yaml_event_delete(&p.event)
|
||
+ }
|
||
+ yaml_parser_delete(&p.parser)
|
||
+}
|
||
+
|
||
+// expect consumes an event from the event stream and
|
||
+// checks that it's of the expected type.
|
||
+func (p *parser) expect(e yaml_event_type_t) {
|
||
+ if p.event.typ == yaml_NO_EVENT {
|
||
+ if !yaml_parser_parse(&p.parser, &p.event) {
|
||
+ p.fail()
|
||
+ }
|
||
+ }
|
||
+ if p.event.typ == yaml_STREAM_END_EVENT {
|
||
+ failf("attempted to go past the end of stream; corrupted value?")
|
||
+ }
|
||
+ if p.event.typ != e {
|
||
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
|
||
+ p.fail()
|
||
+ }
|
||
+ yaml_event_delete(&p.event)
|
||
+ p.event.typ = yaml_NO_EVENT
|
||
+}
|
||
+
|
||
+// peek peeks at the next event in the event stream,
|
||
+// puts the results into p.event and returns the event type.
|
||
+func (p *parser) peek() yaml_event_type_t {
|
||
+ if p.event.typ != yaml_NO_EVENT {
|
||
+ return p.event.typ
|
||
+ }
|
||
+ // It's curious choice from the underlying API to generally return a
|
||
+ // positive result on success, but on this case return true in an error
|
||
+ // scenario. This was the source of bugs in the past (issue #666).
|
||
+ if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR {
|
||
+ p.fail()
|
||
+ }
|
||
+ return p.event.typ
|
||
+}
|
||
+
|
||
+func (p *parser) fail() {
|
||
+ var where string
|
||
+ var line int
|
||
+ if p.parser.context_mark.line != 0 {
|
||
+ line = p.parser.context_mark.line
|
||
+ // Scanner errors don't iterate line before returning error
|
||
+ if p.parser.error == yaml_SCANNER_ERROR {
|
||
+ line++
|
||
+ }
|
||
+ } else if p.parser.problem_mark.line != 0 {
|
||
+ line = p.parser.problem_mark.line
|
||
+ // Scanner errors don't iterate line before returning error
|
||
+ if p.parser.error == yaml_SCANNER_ERROR {
|
||
+ line++
|
||
+ }
|
||
+ }
|
||
+ if line != 0 {
|
||
+ where = "line " + strconv.Itoa(line) + ": "
|
||
+ }
|
||
+ var msg string
|
||
+ if len(p.parser.problem) > 0 {
|
||
+ msg = p.parser.problem
|
||
+ } else {
|
||
+ msg = "unknown problem parsing YAML content"
|
||
+ }
|
||
+ failf("%s%s", where, msg)
|
||
+}
|
||
+
|
||
+func (p *parser) anchor(n *Node, anchor []byte) {
|
||
+ if anchor != nil {
|
||
+ n.Anchor = string(anchor)
|
||
+ p.anchors[n.Anchor] = n
|
||
+ }
|
||
+}
|
||
+
|
||
+func (p *parser) parse() *Node {
|
||
+ p.init()
|
||
+ switch p.peek() {
|
||
+ case yaml_SCALAR_EVENT:
|
||
+ return p.scalar()
|
||
+ case yaml_ALIAS_EVENT:
|
||
+ return p.alias()
|
||
+ case yaml_MAPPING_START_EVENT:
|
||
+ return p.mapping()
|
||
+ case yaml_SEQUENCE_START_EVENT:
|
||
+ return p.sequence()
|
||
+ case yaml_DOCUMENT_START_EVENT:
|
||
+ return p.document()
|
||
+ case yaml_STREAM_END_EVENT:
|
||
+ // Happens when attempting to decode an empty buffer.
|
||
+ return nil
|
||
+ case yaml_TAIL_COMMENT_EVENT:
|
||
+ panic("internal error: unexpected tail comment event (please report)")
|
||
+ default:
|
||
+ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
|
||
+ }
|
||
+}
|
||
+
|
||
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
|
||
+ var style Style
|
||
+ if tag != "" && tag != "!" {
|
||
+ tag = shortTag(tag)
|
||
+ style = TaggedStyle
|
||
+ } else if defaultTag != "" {
|
||
+ tag = defaultTag
|
||
+ } else if kind == ScalarNode {
|
||
+ tag, _ = resolve("", value)
|
||
+ }
|
||
+ n := &Node{
|
||
+ Kind: kind,
|
||
+ Tag: tag,
|
||
+ Value: value,
|
||
+ Style: style,
|
||
+ }
|
||
+ if !p.textless {
|
||
+ n.Line = p.event.start_mark.line + 1
|
||
+ n.Column = p.event.start_mark.column + 1
|
||
+ n.HeadComment = string(p.event.head_comment)
|
||
+ n.LineComment = string(p.event.line_comment)
|
||
+ n.FootComment = string(p.event.foot_comment)
|
||
+ }
|
||
+ return n
|
||
+}
|
||
+
|
||
+func (p *parser) parseChild(parent *Node) *Node {
|
||
+ child := p.parse()
|
||
+ parent.Content = append(parent.Content, child)
|
||
+ return child
|
||
+}
|
||
+
|
||
+func (p *parser) document() *Node {
|
||
+ n := p.node(DocumentNode, "", "", "")
|
||
+ p.doc = n
|
||
+ p.expect(yaml_DOCUMENT_START_EVENT)
|
||
+ p.parseChild(n)
|
||
+ if p.peek() == yaml_DOCUMENT_END_EVENT {
|
||
+ n.FootComment = string(p.event.foot_comment)
|
||
+ }
|
||
+ p.expect(yaml_DOCUMENT_END_EVENT)
|
||
+ return n
|
||
+}
|
||
+
|
||
+func (p *parser) alias() *Node {
|
||
+ n := p.node(AliasNode, "", "", string(p.event.anchor))
|
||
+ n.Alias = p.anchors[n.Value]
|
||
+ if n.Alias == nil {
|
||
+ failf("unknown anchor '%s' referenced", n.Value)
|
||
+ }
|
||
+ p.expect(yaml_ALIAS_EVENT)
|
||
+ return n
|
||
+}
|
||
+
|
||
+func (p *parser) scalar() *Node {
|
||
+ var parsedStyle = p.event.scalar_style()
|
||
+ var nodeStyle Style
|
||
+ switch {
|
||
+ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
|
||
+ nodeStyle = DoubleQuotedStyle
|
||
+ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
|
||
+ nodeStyle = SingleQuotedStyle
|
||
+ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
|
||
+ nodeStyle = LiteralStyle
|
||
+ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
|
||
+ nodeStyle = FoldedStyle
|
||
+ }
|
||
+ var nodeValue = string(p.event.value)
|
||
+ var nodeTag = string(p.event.tag)
|
||
+ var defaultTag string
|
||
+ if nodeStyle == 0 {
|
||
+ if nodeValue == "<<" {
|
||
+ defaultTag = mergeTag
|
||
+ }
|
||
+ } else {
|
||
+ defaultTag = strTag
|
||
+ }
|
||
+ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
|
||
+ n.Style |= nodeStyle
|
||
+ p.anchor(n, p.event.anchor)
|
||
+ p.expect(yaml_SCALAR_EVENT)
|
||
+ return n
|
||
+}
|
||
+
|
||
+func (p *parser) sequence() *Node {
|
||
+ n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
|
||
+ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
|
||
+ n.Style |= FlowStyle
|
||
+ }
|
||
+ p.anchor(n, p.event.anchor)
|
||
+ p.expect(yaml_SEQUENCE_START_EVENT)
|
||
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
|
||
+ p.parseChild(n)
|
||
+ }
|
||
+ n.LineComment = string(p.event.line_comment)
|
||
+ n.FootComment = string(p.event.foot_comment)
|
||
+ p.expect(yaml_SEQUENCE_END_EVENT)
|
||
+ return n
|
||
+}
|
||
+
|
||
+func (p *parser) mapping() *Node {
|
||
+ n := p.node(MappingNode, mapTag, string(p.event.tag), "")
|
||
+ block := true
|
||
+ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
|
||
+ block = false
|
||
+ n.Style |= FlowStyle
|
||
+ }
|
||
+ p.anchor(n, p.event.anchor)
|
||
+ p.expect(yaml_MAPPING_START_EVENT)
|
||
+ for p.peek() != yaml_MAPPING_END_EVENT {
|
||
+ k := p.parseChild(n)
|
||
+ if block && k.FootComment != "" {
|
||
+ // Must be a foot comment for the prior value when being dedented.
|
||
+ if len(n.Content) > 2 {
|
||
+ n.Content[len(n.Content)-3].FootComment = k.FootComment
|
||
+ k.FootComment = ""
|
||
+ }
|
||
+ }
|
||
+ v := p.parseChild(n)
|
||
+ if k.FootComment == "" && v.FootComment != "" {
|
||
+ k.FootComment = v.FootComment
|
||
+ v.FootComment = ""
|
||
+ }
|
||
+ if p.peek() == yaml_TAIL_COMMENT_EVENT {
|
||
+ if k.FootComment == "" {
|
||
+ k.FootComment = string(p.event.foot_comment)
|
||
+ }
|
||
+ p.expect(yaml_TAIL_COMMENT_EVENT)
|
||
+ }
|
||
+ }
|
||
+ n.LineComment = string(p.event.line_comment)
|
||
+ n.FootComment = string(p.event.foot_comment)
|
||
+ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
|
||
+ n.Content[len(n.Content)-2].FootComment = n.FootComment
|
||
+ n.FootComment = ""
|
||
+ }
|
||
+ p.expect(yaml_MAPPING_END_EVENT)
|
||
+ return n
|
||
+}
|
||
+
|
||
+// ----------------------------------------------------------------------------
|
||
+// Decoder, unmarshals a node into a provided value.
|
||
+
|
||
+type decoder struct {
|
||
+ doc *Node
|
||
+ aliases map[*Node]bool
|
||
+ terrors []string
|
||
+
|
||
+ stringMapType reflect.Type
|
||
+ generalMapType reflect.Type
|
||
+
|
||
+ knownFields bool
|
||
+ uniqueKeys bool
|
||
+ decodeCount int
|
||
+ aliasCount int
|
||
+ aliasDepth int
|
||
+
|
||
+ mergedFields map[interface{}]bool
|
||
+}
|
||
+
|
||
+var (
|
||
+ nodeType = reflect.TypeOf(Node{})
|
||
+ durationType = reflect.TypeOf(time.Duration(0))
|
||
+ stringMapType = reflect.TypeOf(map[string]interface{}{})
|
||
+ generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
|
||
+ ifaceType = generalMapType.Elem()
|
||
+ timeType = reflect.TypeOf(time.Time{})
|
||
+ ptrTimeType = reflect.TypeOf(&time.Time{})
|
||
+)
|
||
+
|
||
+func newDecoder() *decoder {
|
||
+ d := &decoder{
|
||
+ stringMapType: stringMapType,
|
||
+ generalMapType: generalMapType,
|
||
+ uniqueKeys: true,
|
||
+ }
|
||
+ d.aliases = make(map[*Node]bool)
|
||
+ return d
|
||
+}
|
||
+
|
||
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
|
||
+ if n.Tag != "" {
|
||
+ tag = n.Tag
|
||
+ }
|
||
+ value := n.Value
|
||
+ if tag != seqTag && tag != mapTag {
|
||
+ if len(value) > 10 {
|
||
+ value = " `" + value[:7] + "...`"
|
||
+ } else {
|
||
+ value = " `" + value + "`"
|
||
+ }
|
||
+ }
|
||
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
|
||
+}
|
||
+
|
||
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
|
||
+ err := u.UnmarshalYAML(n)
|
||
+ if e, ok := err.(*TypeError); ok {
|
||
+ d.terrors = append(d.terrors, e.Errors...)
|
||
+ return false
|
||
+ }
|
||
+ if err != nil {
|
||
+ fail(err)
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
|
||
+ terrlen := len(d.terrors)
|
||
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
|
||
+ defer handleErr(&err)
|
||
+ d.unmarshal(n, reflect.ValueOf(v))
|
||
+ if len(d.terrors) > terrlen {
|
||
+ issues := d.terrors[terrlen:]
|
||
+ d.terrors = d.terrors[:terrlen]
|
||
+ return &TypeError{issues}
|
||
+ }
|
||
+ return nil
|
||
+ })
|
||
+ if e, ok := err.(*TypeError); ok {
|
||
+ d.terrors = append(d.terrors, e.Errors...)
|
||
+ return false
|
||
+ }
|
||
+ if err != nil {
|
||
+ fail(err)
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
|
||
+// if a value is found to implement it.
|
||
+// It returns the initialized and dereferenced out value, whether
|
||
+// unmarshalling was already done by UnmarshalYAML, and if so whether
|
||
+// its types unmarshalled appropriately.
|
||
+//
|
||
+// If n holds a null value, prepare returns before doing anything.
|
||
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
||
+ if n.ShortTag() == nullTag {
|
||
+ return out, false, false
|
||
+ }
|
||
+ again := true
|
||
+ for again {
|
||
+ again = false
|
||
+ if out.Kind() == reflect.Ptr {
|
||
+ if out.IsNil() {
|
||
+ out.Set(reflect.New(out.Type().Elem()))
|
||
+ }
|
||
+ out = out.Elem()
|
||
+ again = true
|
||
+ }
|
||
+ if out.CanAddr() {
|
||
+ outi := out.Addr().Interface()
|
||
+ if u, ok := outi.(Unmarshaler); ok {
|
||
+ good = d.callUnmarshaler(n, u)
|
||
+ return out, true, good
|
||
+ }
|
||
+ if u, ok := outi.(obsoleteUnmarshaler); ok {
|
||
+ good = d.callObsoleteUnmarshaler(n, u)
|
||
+ return out, true, good
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ return out, false, false
|
||
+}
|
||
+
|
||
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
|
||
+ if n.ShortTag() == nullTag {
|
||
+ return reflect.Value{}
|
||
+ }
|
||
+ for _, num := range index {
|
||
+ for {
|
||
+ if v.Kind() == reflect.Ptr {
|
||
+ if v.IsNil() {
|
||
+ v.Set(reflect.New(v.Type().Elem()))
|
||
+ }
|
||
+ v = v.Elem()
|
||
+ continue
|
||
+ }
|
||
+ break
|
||
+ }
|
||
+ v = v.Field(num)
|
||
+ }
|
||
+ return v
|
||
+}
|
||
+
|
||
+const (
|
||
+ // 400,000 decode operations is ~500kb of dense object declarations, or
|
||
+ // ~5kb of dense object declarations with 10000% alias expansion
|
||
+ alias_ratio_range_low = 400000
|
||
+
|
||
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
|
||
+ // ~4.5MB of dense object declarations with 10% alias expansion
|
||
+ alias_ratio_range_high = 4000000
|
||
+
|
||
+ // alias_ratio_range is the range over which we scale allowed alias ratios
|
||
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
|
||
+)
|
||
+
|
||
+func allowedAliasRatio(decodeCount int) float64 {
|
||
+ switch {
|
||
+ case decodeCount <= alias_ratio_range_low:
|
||
+ // allow 99% to come from alias expansion for small-to-medium documents
|
||
+ return 0.99
|
||
+ case decodeCount >= alias_ratio_range_high:
|
||
+ // allow 10% to come from alias expansion for very large documents
|
||
+ return 0.10
|
||
+ default:
|
||
+ // scale smoothly from 99% down to 10% over the range.
|
||
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
|
||
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
|
||
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
|
||
+ }
|
||
+}
|
||
+
|
||
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
|
||
+ d.decodeCount++
|
||
+ if d.aliasDepth > 0 {
|
||
+ d.aliasCount++
|
||
+ }
|
||
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
|
||
+ failf("document contains excessive aliasing")
|
||
+ }
|
||
+ if out.Type() == nodeType {
|
||
+ out.Set(reflect.ValueOf(n).Elem())
|
||
+ return true
|
||
+ }
|
||
+ switch n.Kind {
|
||
+ case DocumentNode:
|
||
+ return d.document(n, out)
|
||
+ case AliasNode:
|
||
+ return d.alias(n, out)
|
||
+ }
|
||
+ out, unmarshaled, good := d.prepare(n, out)
|
||
+ if unmarshaled {
|
||
+ return good
|
||
+ }
|
||
+ switch n.Kind {
|
||
+ case ScalarNode:
|
||
+ good = d.scalar(n, out)
|
||
+ case MappingNode:
|
||
+ good = d.mapping(n, out)
|
||
+ case SequenceNode:
|
||
+ good = d.sequence(n, out)
|
||
+ case 0:
|
||
+ if n.IsZero() {
|
||
+ return d.null(out)
|
||
+ }
|
||
+ fallthrough
|
||
+ default:
|
||
+ failf("cannot decode node with unknown kind %d", n.Kind)
|
||
+ }
|
||
+ return good
|
||
+}
|
||
+
|
||
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
|
||
+ if len(n.Content) == 1 {
|
||
+ d.doc = n
|
||
+ d.unmarshal(n.Content[0], out)
|
||
+ return true
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
|
||
+ if d.aliases[n] {
|
||
+ // TODO this could actually be allowed in some circumstances.
|
||
+ failf("anchor '%s' value contains itself", n.Value)
|
||
+ }
|
||
+ d.aliases[n] = true
|
||
+ d.aliasDepth++
|
||
+ good = d.unmarshal(n.Alias, out)
|
||
+ d.aliasDepth--
|
||
+ delete(d.aliases, n)
|
||
+ return good
|
||
+}
|
||
+
|
||
+var zeroValue reflect.Value
|
||
+
|
||
+func resetMap(out reflect.Value) {
|
||
+ for _, k := range out.MapKeys() {
|
||
+ out.SetMapIndex(k, zeroValue)
|
||
+ }
|
||
+}
|
||
+
|
||
+func (d *decoder) null(out reflect.Value) bool {
|
||
+ if out.CanAddr() {
|
||
+ switch out.Kind() {
|
||
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
|
||
+ out.Set(reflect.Zero(out.Type()))
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
|
||
+ var tag string
|
||
+ var resolved interface{}
|
||
+ if n.indicatedString() {
|
||
+ tag = strTag
|
||
+ resolved = n.Value
|
||
+ } else {
|
||
+ tag, resolved = resolve(n.Tag, n.Value)
|
||
+ if tag == binaryTag {
|
||
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
||
+ if err != nil {
|
||
+ failf("!!binary value contains invalid base64 data")
|
||
+ }
|
||
+ resolved = string(data)
|
||
+ }
|
||
+ }
|
||
+ if resolved == nil {
|
||
+ return d.null(out)
|
||
+ }
|
||
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||
+ // We've resolved to exactly the type we want, so use that.
|
||
+ out.Set(resolvedv)
|
||
+ return true
|
||
+ }
|
||
+ // Perhaps we can use the value as a TextUnmarshaler to
|
||
+ // set its value.
|
||
+ if out.CanAddr() {
|
||
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
|
||
+ if ok {
|
||
+ var text []byte
|
||
+ if tag == binaryTag {
|
||
+ text = []byte(resolved.(string))
|
||
+ } else {
|
||
+ // We let any value be unmarshaled into TextUnmarshaler.
|
||
+ // That might be more lax than we'd like, but the
|
||
+ // TextUnmarshaler itself should bowl out any dubious values.
|
||
+ text = []byte(n.Value)
|
||
+ }
|
||
+ err := u.UnmarshalText(text)
|
||
+ if err != nil {
|
||
+ fail(err)
|
||
+ }
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ switch out.Kind() {
|
||
+ case reflect.String:
|
||
+ if tag == binaryTag {
|
||
+ out.SetString(resolved.(string))
|
||
+ return true
|
||
+ }
|
||
+ out.SetString(n.Value)
|
||
+ return true
|
||
+ case reflect.Interface:
|
||
+ out.Set(reflect.ValueOf(resolved))
|
||
+ return true
|
||
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||
+ // This used to work in v2, but it's very unfriendly.
|
||
+ isDuration := out.Type() == durationType
|
||
+
|
||
+ switch resolved := resolved.(type) {
|
||
+ case int:
|
||
+ if !isDuration && !out.OverflowInt(int64(resolved)) {
|
||
+ out.SetInt(int64(resolved))
|
||
+ return true
|
||
+ }
|
||
+ case int64:
|
||
+ if !isDuration && !out.OverflowInt(resolved) {
|
||
+ out.SetInt(resolved)
|
||
+ return true
|
||
+ }
|
||
+ case uint64:
|
||
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||
+ out.SetInt(int64(resolved))
|
||
+ return true
|
||
+ }
|
||
+ case float64:
|
||
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||
+ out.SetInt(int64(resolved))
|
||
+ return true
|
||
+ }
|
||
+ case string:
|
||
+ if out.Type() == durationType {
|
||
+ d, err := time.ParseDuration(resolved)
|
||
+ if err == nil {
|
||
+ out.SetInt(int64(d))
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||
+ switch resolved := resolved.(type) {
|
||
+ case int:
|
||
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||
+ out.SetUint(uint64(resolved))
|
||
+ return true
|
||
+ }
|
||
+ case int64:
|
||
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||
+ out.SetUint(uint64(resolved))
|
||
+ return true
|
||
+ }
|
||
+ case uint64:
|
||
+ if !out.OverflowUint(uint64(resolved)) {
|
||
+ out.SetUint(uint64(resolved))
|
||
+ return true
|
||
+ }
|
||
+ case float64:
|
||
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
|
||
+ out.SetUint(uint64(resolved))
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ case reflect.Bool:
|
||
+ switch resolved := resolved.(type) {
|
||
+ case bool:
|
||
+ out.SetBool(resolved)
|
||
+ return true
|
||
+ case string:
|
||
+ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
|
||
+ // It only works if explicitly attempting to unmarshal into a typed bool value.
|
||
+ switch resolved {
|
||
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
|
||
+ out.SetBool(true)
|
||
+ return true
|
||
+ case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
|
||
+ out.SetBool(false)
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ case reflect.Float32, reflect.Float64:
|
||
+ switch resolved := resolved.(type) {
|
||
+ case int:
|
||
+ out.SetFloat(float64(resolved))
|
||
+ return true
|
||
+ case int64:
|
||
+ out.SetFloat(float64(resolved))
|
||
+ return true
|
||
+ case uint64:
|
||
+ out.SetFloat(float64(resolved))
|
||
+ return true
|
||
+ case float64:
|
||
+ out.SetFloat(resolved)
|
||
+ return true
|
||
+ }
|
||
+ case reflect.Struct:
|
||
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||
+ out.Set(resolvedv)
|
||
+ return true
|
||
+ }
|
||
+ case reflect.Ptr:
|
||
+ panic("yaml internal error: please report the issue")
|
||
+ }
|
||
+ d.terror(n, tag, out)
|
||
+ return false
|
||
+}
|
||
+
|
||
+func settableValueOf(i interface{}) reflect.Value {
|
||
+ v := reflect.ValueOf(i)
|
||
+ sv := reflect.New(v.Type()).Elem()
|
||
+ sv.Set(v)
|
||
+ return sv
|
||
+}
|
||
+
|
||
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
|
||
+ l := len(n.Content)
|
||
+
|
||
+ var iface reflect.Value
|
||
+ switch out.Kind() {
|
||
+ case reflect.Slice:
|
||
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
|
||
+ case reflect.Array:
|
||
+ if l != out.Len() {
|
||
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
|
||
+ }
|
||
+ case reflect.Interface:
|
||
+ // No type hints. Will have to use a generic sequence.
|
||
+ iface = out
|
||
+ out = settableValueOf(make([]interface{}, l))
|
||
+ default:
|
||
+ d.terror(n, seqTag, out)
|
||
+ return false
|
||
+ }
|
||
+ et := out.Type().Elem()
|
||
+
|
||
+ j := 0
|
||
+ for i := 0; i < l; i++ {
|
||
+ e := reflect.New(et).Elem()
|
||
+ if ok := d.unmarshal(n.Content[i], e); ok {
|
||
+ out.Index(j).Set(e)
|
||
+ j++
|
||
+ }
|
||
+ }
|
||
+ if out.Kind() != reflect.Array {
|
||
+ out.Set(out.Slice(0, j))
|
||
+ }
|
||
+ if iface.IsValid() {
|
||
+ iface.Set(out)
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
|
||
+ l := len(n.Content)
|
||
+ if d.uniqueKeys {
|
||
+ nerrs := len(d.terrors)
|
||
+ for i := 0; i < l; i += 2 {
|
||
+ ni := n.Content[i]
|
||
+ for j := i + 2; j < l; j += 2 {
|
||
+ nj := n.Content[j]
|
||
+ if ni.Kind == nj.Kind && ni.Value == nj.Value {
|
||
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ if len(d.terrors) > nerrs {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ switch out.Kind() {
|
||
+ case reflect.Struct:
|
||
+ return d.mappingStruct(n, out)
|
||
+ case reflect.Map:
|
||
+ // okay
|
||
+ case reflect.Interface:
|
||
+ iface := out
|
||
+ if isStringMap(n) {
|
||
+ out = reflect.MakeMap(d.stringMapType)
|
||
+ } else {
|
||
+ out = reflect.MakeMap(d.generalMapType)
|
||
+ }
|
||
+ iface.Set(out)
|
||
+ default:
|
||
+ d.terror(n, mapTag, out)
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ outt := out.Type()
|
||
+ kt := outt.Key()
|
||
+ et := outt.Elem()
|
||
+
|
||
+ stringMapType := d.stringMapType
|
||
+ generalMapType := d.generalMapType
|
||
+ if outt.Elem() == ifaceType {
|
||
+ if outt.Key().Kind() == reflect.String {
|
||
+ d.stringMapType = outt
|
||
+ } else if outt.Key() == ifaceType {
|
||
+ d.generalMapType = outt
|
||
+ }
|
||
+ }
|
||
+
|
||
+ mergedFields := d.mergedFields
|
||
+ d.mergedFields = nil
|
||
+
|
||
+ var mergeNode *Node
|
||
+
|
||
+ mapIsNew := false
|
||
+ if out.IsNil() {
|
||
+ out.Set(reflect.MakeMap(outt))
|
||
+ mapIsNew = true
|
||
+ }
|
||
+ for i := 0; i < l; i += 2 {
|
||
+ if isMerge(n.Content[i]) {
|
||
+ mergeNode = n.Content[i+1]
|
||
+ continue
|
||
+ }
|
||
+ k := reflect.New(kt).Elem()
|
||
+ if d.unmarshal(n.Content[i], k) {
|
||
+ if mergedFields != nil {
|
||
+ ki := k.Interface()
|
||
+ if mergedFields[ki] {
|
||
+ continue
|
||
+ }
|
||
+ mergedFields[ki] = true
|
||
+ }
|
||
+ kkind := k.Kind()
|
||
+ if kkind == reflect.Interface {
|
||
+ kkind = k.Elem().Kind()
|
||
+ }
|
||
+ if kkind == reflect.Map || kkind == reflect.Slice {
|
||
+ failf("invalid map key: %#v", k.Interface())
|
||
+ }
|
||
+ e := reflect.New(et).Elem()
|
||
+ if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) {
|
||
+ out.SetMapIndex(k, e)
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ d.mergedFields = mergedFields
|
||
+ if mergeNode != nil {
|
||
+ d.merge(n, mergeNode, out)
|
||
+ }
|
||
+
|
||
+ d.stringMapType = stringMapType
|
||
+ d.generalMapType = generalMapType
|
||
+ return true
|
||
+}
|
||
+
|
||
+func isStringMap(n *Node) bool {
|
||
+ if n.Kind != MappingNode {
|
||
+ return false
|
||
+ }
|
||
+ l := len(n.Content)
|
||
+ for i := 0; i < l; i += 2 {
|
||
+ shortTag := n.Content[i].ShortTag()
|
||
+ if shortTag != strTag && shortTag != mergeTag {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
|
||
+ sinfo, err := getStructInfo(out.Type())
|
||
+ if err != nil {
|
||
+ panic(err)
|
||
+ }
|
||
+
|
||
+ var inlineMap reflect.Value
|
||
+ var elemType reflect.Type
|
||
+ if sinfo.InlineMap != -1 {
|
||
+ inlineMap = out.Field(sinfo.InlineMap)
|
||
+ elemType = inlineMap.Type().Elem()
|
||
+ }
|
||
+
|
||
+ for _, index := range sinfo.InlineUnmarshalers {
|
||
+ field := d.fieldByIndex(n, out, index)
|
||
+ d.prepare(n, field)
|
||
+ }
|
||
+
|
||
+ mergedFields := d.mergedFields
|
||
+ d.mergedFields = nil
|
||
+ var mergeNode *Node
|
||
+ var doneFields []bool
|
||
+ if d.uniqueKeys {
|
||
+ doneFields = make([]bool, len(sinfo.FieldsList))
|
||
+ }
|
||
+ name := settableValueOf("")
|
||
+ l := len(n.Content)
|
||
+ for i := 0; i < l; i += 2 {
|
||
+ ni := n.Content[i]
|
||
+ if isMerge(ni) {
|
||
+ mergeNode = n.Content[i+1]
|
||
+ continue
|
||
+ }
|
||
+ if !d.unmarshal(ni, name) {
|
||
+ continue
|
||
+ }
|
||
+ sname := name.String()
|
||
+ if mergedFields != nil {
|
||
+ if mergedFields[sname] {
|
||
+ continue
|
||
+ }
|
||
+ mergedFields[sname] = true
|
||
+ }
|
||
+ if info, ok := sinfo.FieldsMap[sname]; ok {
|
||
+ if d.uniqueKeys {
|
||
+ if doneFields[info.Id] {
|
||
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
|
||
+ continue
|
||
+ }
|
||
+ doneFields[info.Id] = true
|
||
+ }
|
||
+ var field reflect.Value
|
||
+ if info.Inline == nil {
|
||
+ field = out.Field(info.Num)
|
||
+ } else {
|
||
+ field = d.fieldByIndex(n, out, info.Inline)
|
||
+ }
|
||
+ d.unmarshal(n.Content[i+1], field)
|
||
+ } else if sinfo.InlineMap != -1 {
|
||
+ if inlineMap.IsNil() {
|
||
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||
+ }
|
||
+ value := reflect.New(elemType).Elem()
|
||
+ d.unmarshal(n.Content[i+1], value)
|
||
+ inlineMap.SetMapIndex(name, value)
|
||
+ } else if d.knownFields {
|
||
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
|
||
+ }
|
||
+ }
|
||
+
|
||
+ d.mergedFields = mergedFields
|
||
+ if mergeNode != nil {
|
||
+ d.merge(n, mergeNode, out)
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func failWantMap() {
|
||
+ failf("map merge requires map or sequence of maps as the value")
|
||
+}
|
||
+
|
||
+func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) {
|
||
+ mergedFields := d.mergedFields
|
||
+ if mergedFields == nil {
|
||
+ d.mergedFields = make(map[interface{}]bool)
|
||
+ for i := 0; i < len(parent.Content); i += 2 {
|
||
+ k := reflect.New(ifaceType).Elem()
|
||
+ if d.unmarshal(parent.Content[i], k) {
|
||
+ d.mergedFields[k.Interface()] = true
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ switch merge.Kind {
|
||
+ case MappingNode:
|
||
+ d.unmarshal(merge, out)
|
||
+ case AliasNode:
|
||
+ if merge.Alias != nil && merge.Alias.Kind != MappingNode {
|
||
+ failWantMap()
|
||
+ }
|
||
+ d.unmarshal(merge, out)
|
||
+ case SequenceNode:
|
||
+ for i := 0; i < len(merge.Content); i++ {
|
||
+ ni := merge.Content[i]
|
||
+ if ni.Kind == AliasNode {
|
||
+ if ni.Alias != nil && ni.Alias.Kind != MappingNode {
|
||
+ failWantMap()
|
||
+ }
|
||
+ } else if ni.Kind != MappingNode {
|
||
+ failWantMap()
|
||
+ }
|
||
+ d.unmarshal(ni, out)
|
||
+ }
|
||
+ default:
|
||
+ failWantMap()
|
||
+ }
|
||
+
|
||
+ d.mergedFields = mergedFields
|
||
+}
|
||
+
|
||
+func isMerge(n *Node) bool {
|
||
+ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go
|
||
new file mode 100644
|
||
index 000000000000..0f47c9ca8add
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/emitterc.go
|
||
@@ -0,0 +1,2020 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+// Copyright (c) 2006-2010 Kirill Simonov
|
||
+//
|
||
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||
+// this software and associated documentation files (the "Software"), to deal in
|
||
+// the Software without restriction, including without limitation the rights to
|
||
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||
+// of the Software, and to permit persons to whom the Software is furnished to do
|
||
+// so, subject to the following conditions:
|
||
+//
|
||
+// The above copyright notice and this permission notice shall be included in all
|
||
+// copies or substantial portions of the Software.
|
||
+//
|
||
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+// SOFTWARE.
|
||
+
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+)
|
||
+
|
||
+// Flush the buffer if needed.
|
||
+func flush(emitter *yaml_emitter_t) bool {
|
||
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
|
||
+ return yaml_emitter_flush(emitter)
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Put a character to the output buffer.
|
||
+func put(emitter *yaml_emitter_t, value byte) bool {
|
||
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.buffer[emitter.buffer_pos] = value
|
||
+ emitter.buffer_pos++
|
||
+ emitter.column++
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Put a line break to the output buffer.
|
||
+func put_break(emitter *yaml_emitter_t) bool {
|
||
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ switch emitter.line_break {
|
||
+ case yaml_CR_BREAK:
|
||
+ emitter.buffer[emitter.buffer_pos] = '\r'
|
||
+ emitter.buffer_pos += 1
|
||
+ case yaml_LN_BREAK:
|
||
+ emitter.buffer[emitter.buffer_pos] = '\n'
|
||
+ emitter.buffer_pos += 1
|
||
+ case yaml_CRLN_BREAK:
|
||
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
|
||
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
|
||
+ emitter.buffer_pos += 2
|
||
+ default:
|
||
+ panic("unknown line break setting")
|
||
+ }
|
||
+ if emitter.column == 0 {
|
||
+ emitter.space_above = true
|
||
+ }
|
||
+ emitter.column = 0
|
||
+ emitter.line++
|
||
+ // [Go] Do this here and below and drop from everywhere else (see commented lines).
|
||
+ emitter.indention = true
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Copy a character from a string into buffer.
|
||
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
|
||
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ p := emitter.buffer_pos
|
||
+ w := width(s[*i])
|
||
+ switch w {
|
||
+ case 4:
|
||
+ emitter.buffer[p+3] = s[*i+3]
|
||
+ fallthrough
|
||
+ case 3:
|
||
+ emitter.buffer[p+2] = s[*i+2]
|
||
+ fallthrough
|
||
+ case 2:
|
||
+ emitter.buffer[p+1] = s[*i+1]
|
||
+ fallthrough
|
||
+ case 1:
|
||
+ emitter.buffer[p+0] = s[*i+0]
|
||
+ default:
|
||
+ panic("unknown character width")
|
||
+ }
|
||
+ emitter.column++
|
||
+ emitter.buffer_pos += w
|
||
+ *i += w
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Write a whole string into buffer.
|
||
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
|
||
+ for i := 0; i < len(s); {
|
||
+ if !write(emitter, s, &i) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Copy a line break character from a string into buffer.
|
||
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
|
||
+ if s[*i] == '\n' {
|
||
+ if !put_break(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ *i++
|
||
+ } else {
|
||
+ if !write(emitter, s, i) {
|
||
+ return false
|
||
+ }
|
||
+ if emitter.column == 0 {
|
||
+ emitter.space_above = true
|
||
+ }
|
||
+ emitter.column = 0
|
||
+ emitter.line++
|
||
+ // [Go] Do this here and above and drop from everywhere else (see commented lines).
|
||
+ emitter.indention = true
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Set an emitter error and return false.
|
||
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
|
||
+ emitter.error = yaml_EMITTER_ERROR
|
||
+ emitter.problem = problem
|
||
+ return false
|
||
+}
|
||
+
|
||
+// Emit an event.
|
||
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+ emitter.events = append(emitter.events, *event)
|
||
+ for !yaml_emitter_need_more_events(emitter) {
|
||
+ event := &emitter.events[emitter.events_head]
|
||
+ if !yaml_emitter_analyze_event(emitter, event) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_state_machine(emitter, event) {
|
||
+ return false
|
||
+ }
|
||
+ yaml_event_delete(event)
|
||
+ emitter.events_head++
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Check if we need to accumulate more events before emitting.
|
||
+//
|
||
+// We accumulate extra
|
||
+// - 1 event for DOCUMENT-START
|
||
+// - 2 events for SEQUENCE-START
|
||
+// - 3 events for MAPPING-START
|
||
+//
|
||
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
|
||
+ if emitter.events_head == len(emitter.events) {
|
||
+ return true
|
||
+ }
|
||
+ var accumulate int
|
||
+ switch emitter.events[emitter.events_head].typ {
|
||
+ case yaml_DOCUMENT_START_EVENT:
|
||
+ accumulate = 1
|
||
+ break
|
||
+ case yaml_SEQUENCE_START_EVENT:
|
||
+ accumulate = 2
|
||
+ break
|
||
+ case yaml_MAPPING_START_EVENT:
|
||
+ accumulate = 3
|
||
+ break
|
||
+ default:
|
||
+ return false
|
||
+ }
|
||
+ if len(emitter.events)-emitter.events_head > accumulate {
|
||
+ return false
|
||
+ }
|
||
+ var level int
|
||
+ for i := emitter.events_head; i < len(emitter.events); i++ {
|
||
+ switch emitter.events[i].typ {
|
||
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
|
||
+ level++
|
||
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
|
||
+ level--
|
||
+ }
|
||
+ if level == 0 {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Append a directive to the directives stack.
|
||
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
|
||
+ for i := 0; i < len(emitter.tag_directives); i++ {
|
||
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
|
||
+ if allow_duplicates {
|
||
+ return true
|
||
+ }
|
||
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // [Go] Do we actually need to copy this given garbage collection
|
||
+ // and the lack of deallocating destructors?
|
||
+ tag_copy := yaml_tag_directive_t{
|
||
+ handle: make([]byte, len(value.handle)),
|
||
+ prefix: make([]byte, len(value.prefix)),
|
||
+ }
|
||
+ copy(tag_copy.handle, value.handle)
|
||
+ copy(tag_copy.prefix, value.prefix)
|
||
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Increase the indentation level.
|
||
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
|
||
+ emitter.indents = append(emitter.indents, emitter.indent)
|
||
+ if emitter.indent < 0 {
|
||
+ if flow {
|
||
+ emitter.indent = emitter.best_indent
|
||
+ } else {
|
||
+ emitter.indent = 0
|
||
+ }
|
||
+ } else if !indentless {
|
||
+ // [Go] This was changed so that indentations are more regular.
|
||
+ if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
|
||
+ // The first indent inside a sequence will just skip the "- " indicator.
|
||
+ emitter.indent += 2
|
||
+ } else {
|
||
+ // Everything else aligns to the chosen indentation.
|
||
+ emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent)
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// State dispatcher.
|
||
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+ switch emitter.state {
|
||
+ default:
|
||
+ case yaml_EMIT_STREAM_START_STATE:
|
||
+ return yaml_emitter_emit_stream_start(emitter, event)
|
||
+
|
||
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
|
||
+ return yaml_emitter_emit_document_start(emitter, event, true)
|
||
+
|
||
+ case yaml_EMIT_DOCUMENT_START_STATE:
|
||
+ return yaml_emitter_emit_document_start(emitter, event, false)
|
||
+
|
||
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
|
||
+ return yaml_emitter_emit_document_content(emitter, event)
|
||
+
|
||
+ case yaml_EMIT_DOCUMENT_END_STATE:
|
||
+ return yaml_emitter_emit_document_end(emitter, event)
|
||
+
|
||
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
|
||
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
|
||
+
|
||
+ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
|
||
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
|
||
+
|
||
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
|
||
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
|
||
+
|
||
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
|
||
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
|
||
+
|
||
+ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
|
||
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
|
||
+
|
||
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
|
||
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
|
||
+
|
||
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
|
||
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
|
||
+
|
||
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
|
||
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
|
||
+
|
||
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
|
||
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
|
||
+
|
||
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
|
||
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
|
||
+
|
||
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
|
||
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
|
||
+
|
||
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
|
||
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
|
||
+
|
||
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
|
||
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
|
||
+
|
||
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
|
||
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
|
||
+
|
||
+ case yaml_EMIT_END_STATE:
|
||
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
|
||
+ }
|
||
+ panic("invalid emitter state")
|
||
+}
|
||
+
|
||
+// Expect STREAM-START.
|
||
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+ if event.typ != yaml_STREAM_START_EVENT {
|
||
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
|
||
+ }
|
||
+ if emitter.encoding == yaml_ANY_ENCODING {
|
||
+ emitter.encoding = event.encoding
|
||
+ if emitter.encoding == yaml_ANY_ENCODING {
|
||
+ emitter.encoding = yaml_UTF8_ENCODING
|
||
+ }
|
||
+ }
|
||
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
|
||
+ emitter.best_indent = 2
|
||
+ }
|
||
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
|
||
+ emitter.best_width = 80
|
||
+ }
|
||
+ if emitter.best_width < 0 {
|
||
+ emitter.best_width = 1<<31 - 1
|
||
+ }
|
||
+ if emitter.line_break == yaml_ANY_BREAK {
|
||
+ emitter.line_break = yaml_LN_BREAK
|
||
+ }
|
||
+
|
||
+ emitter.indent = -1
|
||
+ emitter.line = 0
|
||
+ emitter.column = 0
|
||
+ emitter.whitespace = true
|
||
+ emitter.indention = true
|
||
+ emitter.space_above = true
|
||
+ emitter.foot_indent = -1
|
||
+
|
||
+ if emitter.encoding != yaml_UTF8_ENCODING {
|
||
+ if !yaml_emitter_write_bom(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Expect DOCUMENT-START or STREAM-END.
|
||
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
|
||
+
|
||
+ if event.typ == yaml_DOCUMENT_START_EVENT {
|
||
+
|
||
+ if event.version_directive != nil {
|
||
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ for i := 0; i < len(event.tag_directives); i++ {
|
||
+ tag_directive := &event.tag_directives[i]
|
||
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ for i := 0; i < len(default_tag_directives); i++ {
|
||
+ tag_directive := &default_tag_directives[i]
|
||
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ implicit := event.implicit
|
||
+ if !first || emitter.canonical {
|
||
+ implicit = false
|
||
+ }
|
||
+
|
||
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if event.version_directive != nil {
|
||
+ implicit = false
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if len(event.tag_directives) > 0 {
|
||
+ implicit = false
|
||
+ for i := 0; i < len(event.tag_directives); i++ {
|
||
+ tag_directive := &event.tag_directives[i]
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if yaml_emitter_check_empty_document(emitter) {
|
||
+ implicit = false
|
||
+ }
|
||
+ if !implicit {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if emitter.canonical || true {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if len(emitter.head_comment) > 0 {
|
||
+ if !yaml_emitter_process_head_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !put_break(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ if event.typ == yaml_STREAM_END_EVENT {
|
||
+ if emitter.open_ended {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_flush(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.state = yaml_EMIT_END_STATE
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
|
||
+}
|
||
+
|
||
+// Expect the root node.
|
||
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
|
||
+
|
||
+ if !yaml_emitter_process_head_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_line_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_foot_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Expect DOCUMENT-END.
|
||
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+ if event.typ != yaml_DOCUMENT_END_EVENT {
|
||
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
|
||
+ }
|
||
+ // [Go] Force document foot separation.
|
||
+ emitter.foot_indent = 0
|
||
+ if !yaml_emitter_process_foot_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.foot_indent = -1
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !event.implicit {
|
||
+ // [Go] Allocate the slice elsewhere.
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_flush(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
|
||
+ emitter.tag_directives = emitter.tag_directives[:0]
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Expect a flow item node.
|
||
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
|
||
+ if first {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_increase_indent(emitter, true, false) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.flow_level++
|
||
+ }
|
||
+
|
||
+ if event.typ == yaml_SEQUENCE_END_EVENT {
|
||
+ if emitter.canonical && !first && !trail {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ emitter.flow_level--
|
||
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
|
||
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
|
||
+ if emitter.column == 0 || emitter.canonical && !first {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_line_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_foot_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.state = emitter.states[len(emitter.states)-1]
|
||
+ emitter.states = emitter.states[:len(emitter.states)-1]
|
||
+
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ if !first && !trail {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if !yaml_emitter_process_head_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if emitter.column == 0 {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if emitter.canonical || emitter.column > emitter.best_width {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
|
||
+ } else {
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
|
||
+ }
|
||
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_process_line_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_foot_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Expect a flow key node.
|
||
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
|
||
+ if first {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_increase_indent(emitter, true, false) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.flow_level++
|
||
+ }
|
||
+
|
||
+ if event.typ == yaml_MAPPING_END_EVENT {
|
||
+ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_process_head_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.flow_level--
|
||
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
|
||
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
|
||
+ if emitter.canonical && !first {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_line_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_foot_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.state = emitter.states[len(emitter.states)-1]
|
||
+ emitter.states = emitter.states[:len(emitter.states)-1]
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ if !first && !trail {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if !yaml_emitter_process_head_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if emitter.column == 0 {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if emitter.canonical || emitter.column > emitter.best_width {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
|
||
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
|
||
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
|
||
+}
|
||
+
|
||
+// Expect a flow value node.
|
||
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
|
||
+ if simple {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ } else {
|
||
+ if emitter.canonical || emitter.column > emitter.best_width {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
|
||
+ } else {
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
|
||
+ }
|
||
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
|
||
+ return false
|
||
+ }
|
||
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_process_line_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_foot_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Expect a block item node.
|
||
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
|
||
+ if first {
|
||
+ if !yaml_emitter_increase_indent(emitter, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if event.typ == yaml_SEQUENCE_END_EVENT {
|
||
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
|
||
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
|
||
+ emitter.state = emitter.states[len(emitter.states)-1]
|
||
+ emitter.states = emitter.states[:len(emitter.states)-1]
|
||
+ return true
|
||
+ }
|
||
+ if !yaml_emitter_process_head_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
|
||
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_line_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_foot_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Expect a block key node.
|
||
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
|
||
+ if first {
|
||
+ if !yaml_emitter_increase_indent(emitter, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_process_head_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if event.typ == yaml_MAPPING_END_EVENT {
|
||
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
|
||
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
|
||
+ emitter.state = emitter.states[len(emitter.states)-1]
|
||
+ emitter.states = emitter.states[:len(emitter.states)-1]
|
||
+ return true
|
||
+ }
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if len(emitter.line_comment) > 0 {
|
||
+ // [Go] A line comment was provided for the key. That's unusual as the
|
||
+ // scanner associates line comments with the value. Either way,
|
||
+ // save the line comment and render it appropriately later.
|
||
+ emitter.key_line_comment = emitter.line_comment
|
||
+ emitter.line_comment = nil
|
||
+ }
|
||
+ if yaml_emitter_check_simple_key(emitter) {
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
|
||
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
|
||
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
|
||
+}
|
||
+
|
||
+// Expect a block value node.
|
||
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
|
||
+ if simple {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ } else {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if len(emitter.key_line_comment) > 0 {
|
||
+ // [Go] Line comments are generally associated with the value, but when there's
|
||
+ // no value on the same line as a mapping key they end up attached to the
|
||
+ // key itself.
|
||
+ if event.typ == yaml_SCALAR_EVENT {
|
||
+ if len(emitter.line_comment) == 0 {
|
||
+ // A scalar is coming and it has no line comments by itself yet,
|
||
+ // so just let it handle the line comment as usual. If it has a
|
||
+ // line comment, we can't have both so the one from the key is lost.
|
||
+ emitter.line_comment = emitter.key_line_comment
|
||
+ emitter.key_line_comment = nil
|
||
+ }
|
||
+ } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) {
|
||
+ // An indented block follows, so write the comment right now.
|
||
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
|
||
+ if !yaml_emitter_process_line_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
|
||
+ }
|
||
+ }
|
||
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
|
||
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_line_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_foot_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+ return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
|
||
+}
|
||
+
|
||
+// Expect a node.
|
||
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
|
||
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
|
||
+
|
||
+ emitter.root_context = root
|
||
+ emitter.sequence_context = sequence
|
||
+ emitter.mapping_context = mapping
|
||
+ emitter.simple_key_context = simple_key
|
||
+
|
||
+ switch event.typ {
|
||
+ case yaml_ALIAS_EVENT:
|
||
+ return yaml_emitter_emit_alias(emitter, event)
|
||
+ case yaml_SCALAR_EVENT:
|
||
+ return yaml_emitter_emit_scalar(emitter, event)
|
||
+ case yaml_SEQUENCE_START_EVENT:
|
||
+ return yaml_emitter_emit_sequence_start(emitter, event)
|
||
+ case yaml_MAPPING_START_EVENT:
|
||
+ return yaml_emitter_emit_mapping_start(emitter, event)
|
||
+ default:
|
||
+ return yaml_emitter_set_emitter_error(emitter,
|
||
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
|
||
+ }
|
||
+}
|
||
+
|
||
+// Expect ALIAS.
|
||
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+ if !yaml_emitter_process_anchor(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.state = emitter.states[len(emitter.states)-1]
|
||
+ emitter.states = emitter.states[:len(emitter.states)-1]
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Expect SCALAR.
|
||
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+ if !yaml_emitter_select_scalar_style(emitter, event) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_anchor(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_tag(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_increase_indent(emitter, true, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_scalar(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
|
||
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
|
||
+ emitter.state = emitter.states[len(emitter.states)-1]
|
||
+ emitter.states = emitter.states[:len(emitter.states)-1]
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Expect SEQUENCE-START.
|
||
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+ if !yaml_emitter_process_anchor(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_tag(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
|
||
+ yaml_emitter_check_empty_sequence(emitter) {
|
||
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
|
||
+ } else {
|
||
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Expect MAPPING-START.
|
||
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+ if !yaml_emitter_process_anchor(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_tag(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
|
||
+ yaml_emitter_check_empty_mapping(emitter) {
|
||
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
|
||
+ } else {
|
||
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Check if the document content is an empty scalar.
|
||
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
|
||
+ return false // [Go] Huh?
|
||
+}
|
||
+
|
||
+// Check if the next events represent an empty sequence.
|
||
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
|
||
+ if len(emitter.events)-emitter.events_head < 2 {
|
||
+ return false
|
||
+ }
|
||
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
|
||
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
|
||
+}
|
||
+
|
||
+// Check if the next events represent an empty mapping.
|
||
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
|
||
+ if len(emitter.events)-emitter.events_head < 2 {
|
||
+ return false
|
||
+ }
|
||
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
|
||
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
|
||
+}
|
||
+
|
||
+// Check if the next node can be expressed as a simple key.
|
||
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
|
||
+ length := 0
|
||
+ switch emitter.events[emitter.events_head].typ {
|
||
+ case yaml_ALIAS_EVENT:
|
||
+ length += len(emitter.anchor_data.anchor)
|
||
+ case yaml_SCALAR_EVENT:
|
||
+ if emitter.scalar_data.multiline {
|
||
+ return false
|
||
+ }
|
||
+ length += len(emitter.anchor_data.anchor) +
|
||
+ len(emitter.tag_data.handle) +
|
||
+ len(emitter.tag_data.suffix) +
|
||
+ len(emitter.scalar_data.value)
|
||
+ case yaml_SEQUENCE_START_EVENT:
|
||
+ if !yaml_emitter_check_empty_sequence(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ length += len(emitter.anchor_data.anchor) +
|
||
+ len(emitter.tag_data.handle) +
|
||
+ len(emitter.tag_data.suffix)
|
||
+ case yaml_MAPPING_START_EVENT:
|
||
+ if !yaml_emitter_check_empty_mapping(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ length += len(emitter.anchor_data.anchor) +
|
||
+ len(emitter.tag_data.handle) +
|
||
+ len(emitter.tag_data.suffix)
|
||
+ default:
|
||
+ return false
|
||
+ }
|
||
+ return length <= 128
|
||
+}
|
||
+
|
||
+// Determine an acceptable scalar style.
|
||
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+
|
||
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
|
||
+ if no_tag && !event.implicit && !event.quoted_implicit {
|
||
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
|
||
+ }
|
||
+
|
||
+ style := event.scalar_style()
|
||
+ if style == yaml_ANY_SCALAR_STYLE {
|
||
+ style = yaml_PLAIN_SCALAR_STYLE
|
||
+ }
|
||
+ if emitter.canonical {
|
||
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||
+ }
|
||
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
|
||
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||
+ }
|
||
+
|
||
+ if style == yaml_PLAIN_SCALAR_STYLE {
|
||
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
|
||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
|
||
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
|
||
+ }
|
||
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
|
||
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
|
||
+ }
|
||
+ if no_tag && !event.implicit {
|
||
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
|
||
+ }
|
||
+ }
|
||
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
|
||
+ if !emitter.scalar_data.single_quoted_allowed {
|
||
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||
+ }
|
||
+ }
|
||
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
|
||
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
|
||
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
|
||
+ emitter.tag_data.handle = []byte{'!'}
|
||
+ }
|
||
+ emitter.scalar_data.style = style
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Write an anchor.
|
||
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
|
||
+ if emitter.anchor_data.anchor == nil {
|
||
+ return true
|
||
+ }
|
||
+ c := []byte{'&'}
|
||
+ if emitter.anchor_data.alias {
|
||
+ c[0] = '*'
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
|
||
+}
|
||
+
|
||
+// Write a tag.
|
||
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
|
||
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
|
||
+ return true
|
||
+ }
|
||
+ if len(emitter.tag_data.handle) > 0 {
|
||
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
|
||
+ return false
|
||
+ }
|
||
+ if len(emitter.tag_data.suffix) > 0 {
|
||
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ } else {
|
||
+ // [Go] Allocate these slices elsewhere.
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Write a scalar.
|
||
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
|
||
+ switch emitter.scalar_data.style {
|
||
+ case yaml_PLAIN_SCALAR_STYLE:
|
||
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
|
||
+
|
||
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
|
||
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
|
||
+
|
||
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
|
||
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
|
||
+
|
||
+ case yaml_LITERAL_SCALAR_STYLE:
|
||
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
|
||
+
|
||
+ case yaml_FOLDED_SCALAR_STYLE:
|
||
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
|
||
+ }
|
||
+ panic("unknown scalar style")
|
||
+}
|
||
+
|
||
+// Write a head comment.
|
||
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
|
||
+ if len(emitter.tail_comment) > 0 {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.tail_comment = emitter.tail_comment[:0]
|
||
+ emitter.foot_indent = emitter.indent
|
||
+ if emitter.foot_indent < 0 {
|
||
+ emitter.foot_indent = 0
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if len(emitter.head_comment) == 0 {
|
||
+ return true
|
||
+ }
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.head_comment = emitter.head_comment[:0]
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Write an line comment.
|
||
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
|
||
+ if len(emitter.line_comment) == 0 {
|
||
+ return true
|
||
+ }
|
||
+ if !emitter.whitespace {
|
||
+ if !put(emitter, ' ') {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.line_comment = emitter.line_comment[:0]
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Write a foot comment.
|
||
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
|
||
+ if len(emitter.foot_comment) == 0 {
|
||
+ return true
|
||
+ }
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.foot_comment = emitter.foot_comment[:0]
|
||
+ emitter.foot_indent = emitter.indent
|
||
+ if emitter.foot_indent < 0 {
|
||
+ emitter.foot_indent = 0
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Check if a %YAML directive is valid.
|
||
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
|
||
+ if version_directive.major != 1 || version_directive.minor != 1 {
|
||
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Check if a %TAG directive is valid.
|
||
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
|
||
+ handle := tag_directive.handle
|
||
+ prefix := tag_directive.prefix
|
||
+ if len(handle) == 0 {
|
||
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
|
||
+ }
|
||
+ if handle[0] != '!' {
|
||
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
|
||
+ }
|
||
+ if handle[len(handle)-1] != '!' {
|
||
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
|
||
+ }
|
||
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
|
||
+ if !is_alpha(handle, i) {
|
||
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
|
||
+ }
|
||
+ }
|
||
+ if len(prefix) == 0 {
|
||
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Check if an anchor is valid.
|
||
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
|
||
+ if len(anchor) == 0 {
|
||
+ problem := "anchor value must not be empty"
|
||
+ if alias {
|
||
+ problem = "alias value must not be empty"
|
||
+ }
|
||
+ return yaml_emitter_set_emitter_error(emitter, problem)
|
||
+ }
|
||
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
|
||
+ if !is_alpha(anchor, i) {
|
||
+ problem := "anchor value must contain alphanumerical characters only"
|
||
+ if alias {
|
||
+ problem = "alias value must contain alphanumerical characters only"
|
||
+ }
|
||
+ return yaml_emitter_set_emitter_error(emitter, problem)
|
||
+ }
|
||
+ }
|
||
+ emitter.anchor_data.anchor = anchor
|
||
+ emitter.anchor_data.alias = alias
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Check if a tag is valid.
|
||
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
|
||
+ if len(tag) == 0 {
|
||
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
|
||
+ }
|
||
+ for i := 0; i < len(emitter.tag_directives); i++ {
|
||
+ tag_directive := &emitter.tag_directives[i]
|
||
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
|
||
+ emitter.tag_data.handle = tag_directive.handle
|
||
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ emitter.tag_data.suffix = tag
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Check if a scalar is valid.
|
||
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
|
||
+ var (
|
||
+ block_indicators = false
|
||
+ flow_indicators = false
|
||
+ line_breaks = false
|
||
+ special_characters = false
|
||
+ tab_characters = false
|
||
+
|
||
+ leading_space = false
|
||
+ leading_break = false
|
||
+ trailing_space = false
|
||
+ trailing_break = false
|
||
+ break_space = false
|
||
+ space_break = false
|
||
+
|
||
+ preceded_by_whitespace = false
|
||
+ followed_by_whitespace = false
|
||
+ previous_space = false
|
||
+ previous_break = false
|
||
+ )
|
||
+
|
||
+ emitter.scalar_data.value = value
|
||
+
|
||
+ if len(value) == 0 {
|
||
+ emitter.scalar_data.multiline = false
|
||
+ emitter.scalar_data.flow_plain_allowed = false
|
||
+ emitter.scalar_data.block_plain_allowed = true
|
||
+ emitter.scalar_data.single_quoted_allowed = true
|
||
+ emitter.scalar_data.block_allowed = false
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
|
||
+ block_indicators = true
|
||
+ flow_indicators = true
|
||
+ }
|
||
+
|
||
+ preceded_by_whitespace = true
|
||
+ for i, w := 0, 0; i < len(value); i += w {
|
||
+ w = width(value[i])
|
||
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
|
||
+
|
||
+ if i == 0 {
|
||
+ switch value[i] {
|
||
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
|
||
+ flow_indicators = true
|
||
+ block_indicators = true
|
||
+ case '?', ':':
|
||
+ flow_indicators = true
|
||
+ if followed_by_whitespace {
|
||
+ block_indicators = true
|
||
+ }
|
||
+ case '-':
|
||
+ if followed_by_whitespace {
|
||
+ flow_indicators = true
|
||
+ block_indicators = true
|
||
+ }
|
||
+ }
|
||
+ } else {
|
||
+ switch value[i] {
|
||
+ case ',', '?', '[', ']', '{', '}':
|
||
+ flow_indicators = true
|
||
+ case ':':
|
||
+ flow_indicators = true
|
||
+ if followed_by_whitespace {
|
||
+ block_indicators = true
|
||
+ }
|
||
+ case '#':
|
||
+ if preceded_by_whitespace {
|
||
+ flow_indicators = true
|
||
+ block_indicators = true
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if value[i] == '\t' {
|
||
+ tab_characters = true
|
||
+ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
|
||
+ special_characters = true
|
||
+ }
|
||
+ if is_space(value, i) {
|
||
+ if i == 0 {
|
||
+ leading_space = true
|
||
+ }
|
||
+ if i+width(value[i]) == len(value) {
|
||
+ trailing_space = true
|
||
+ }
|
||
+ if previous_break {
|
||
+ break_space = true
|
||
+ }
|
||
+ previous_space = true
|
||
+ previous_break = false
|
||
+ } else if is_break(value, i) {
|
||
+ line_breaks = true
|
||
+ if i == 0 {
|
||
+ leading_break = true
|
||
+ }
|
||
+ if i+width(value[i]) == len(value) {
|
||
+ trailing_break = true
|
||
+ }
|
||
+ if previous_space {
|
||
+ space_break = true
|
||
+ }
|
||
+ previous_space = false
|
||
+ previous_break = true
|
||
+ } else {
|
||
+ previous_space = false
|
||
+ previous_break = false
|
||
+ }
|
||
+
|
||
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
|
||
+ preceded_by_whitespace = is_blankz(value, i)
|
||
+ }
|
||
+
|
||
+ emitter.scalar_data.multiline = line_breaks
|
||
+ emitter.scalar_data.flow_plain_allowed = true
|
||
+ emitter.scalar_data.block_plain_allowed = true
|
||
+ emitter.scalar_data.single_quoted_allowed = true
|
||
+ emitter.scalar_data.block_allowed = true
|
||
+
|
||
+ if leading_space || leading_break || trailing_space || trailing_break {
|
||
+ emitter.scalar_data.flow_plain_allowed = false
|
||
+ emitter.scalar_data.block_plain_allowed = false
|
||
+ }
|
||
+ if trailing_space {
|
||
+ emitter.scalar_data.block_allowed = false
|
||
+ }
|
||
+ if break_space {
|
||
+ emitter.scalar_data.flow_plain_allowed = false
|
||
+ emitter.scalar_data.block_plain_allowed = false
|
||
+ emitter.scalar_data.single_quoted_allowed = false
|
||
+ }
|
||
+ if space_break || tab_characters || special_characters {
|
||
+ emitter.scalar_data.flow_plain_allowed = false
|
||
+ emitter.scalar_data.block_plain_allowed = false
|
||
+ emitter.scalar_data.single_quoted_allowed = false
|
||
+ }
|
||
+ if space_break || special_characters {
|
||
+ emitter.scalar_data.block_allowed = false
|
||
+ }
|
||
+ if line_breaks {
|
||
+ emitter.scalar_data.flow_plain_allowed = false
|
||
+ emitter.scalar_data.block_plain_allowed = false
|
||
+ }
|
||
+ if flow_indicators {
|
||
+ emitter.scalar_data.flow_plain_allowed = false
|
||
+ }
|
||
+ if block_indicators {
|
||
+ emitter.scalar_data.block_plain_allowed = false
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Check if the event data is valid.
|
||
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
|
||
+
|
||
+ emitter.anchor_data.anchor = nil
|
||
+ emitter.tag_data.handle = nil
|
||
+ emitter.tag_data.suffix = nil
|
||
+ emitter.scalar_data.value = nil
|
||
+
|
||
+ if len(event.head_comment) > 0 {
|
||
+ emitter.head_comment = event.head_comment
|
||
+ }
|
||
+ if len(event.line_comment) > 0 {
|
||
+ emitter.line_comment = event.line_comment
|
||
+ }
|
||
+ if len(event.foot_comment) > 0 {
|
||
+ emitter.foot_comment = event.foot_comment
|
||
+ }
|
||
+ if len(event.tail_comment) > 0 {
|
||
+ emitter.tail_comment = event.tail_comment
|
||
+ }
|
||
+
|
||
+ switch event.typ {
|
||
+ case yaml_ALIAS_EVENT:
|
||
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ case yaml_SCALAR_EVENT:
|
||
+ if len(event.anchor) > 0 {
|
||
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
|
||
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ case yaml_SEQUENCE_START_EVENT:
|
||
+ if len(event.anchor) > 0 {
|
||
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
|
||
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ case yaml_MAPPING_START_EVENT:
|
||
+ if len(event.anchor) > 0 {
|
||
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
|
||
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Write the BOM character.
|
||
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
|
||
+ if !flush(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ pos := emitter.buffer_pos
|
||
+ emitter.buffer[pos+0] = '\xEF'
|
||
+ emitter.buffer[pos+1] = '\xBB'
|
||
+ emitter.buffer[pos+2] = '\xBF'
|
||
+ emitter.buffer_pos += 3
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
|
||
+ indent := emitter.indent
|
||
+ if indent < 0 {
|
||
+ indent = 0
|
||
+ }
|
||
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
|
||
+ if !put_break(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if emitter.foot_indent == indent {
|
||
+ if !put_break(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ for emitter.column < indent {
|
||
+ if !put(emitter, ' ') {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ emitter.whitespace = true
|
||
+ //emitter.indention = true
|
||
+ emitter.space_above = false
|
||
+ emitter.foot_indent = -1
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
|
||
+ if need_whitespace && !emitter.whitespace {
|
||
+ if !put(emitter, ' ') {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !write_all(emitter, indicator) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.whitespace = is_whitespace
|
||
+ emitter.indention = (emitter.indention && is_indention)
|
||
+ emitter.open_ended = false
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
|
||
+ if !write_all(emitter, value) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.whitespace = false
|
||
+ emitter.indention = false
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
|
||
+ if !emitter.whitespace {
|
||
+ if !put(emitter, ' ') {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !write_all(emitter, value) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.whitespace = false
|
||
+ emitter.indention = false
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
|
||
+ if need_whitespace && !emitter.whitespace {
|
||
+ if !put(emitter, ' ') {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ for i := 0; i < len(value); {
|
||
+ var must_write bool
|
||
+ switch value[i] {
|
||
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
|
||
+ must_write = true
|
||
+ default:
|
||
+ must_write = is_alpha(value, i)
|
||
+ }
|
||
+ if must_write {
|
||
+ if !write(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ } else {
|
||
+ w := width(value[i])
|
||
+ for k := 0; k < w; k++ {
|
||
+ octet := value[i]
|
||
+ i++
|
||
+ if !put(emitter, '%') {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ c := octet >> 4
|
||
+ if c < 10 {
|
||
+ c += '0'
|
||
+ } else {
|
||
+ c += 'A' - 10
|
||
+ }
|
||
+ if !put(emitter, c) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ c = octet & 0x0f
|
||
+ if c < 10 {
|
||
+ c += '0'
|
||
+ } else {
|
||
+ c += 'A' - 10
|
||
+ }
|
||
+ if !put(emitter, c) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ emitter.whitespace = false
|
||
+ emitter.indention = false
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
|
||
+ if len(value) > 0 && !emitter.whitespace {
|
||
+ if !put(emitter, ' ') {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ spaces := false
|
||
+ breaks := false
|
||
+ for i := 0; i < len(value); {
|
||
+ if is_space(value, i) {
|
||
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ i += width(value[i])
|
||
+ } else {
|
||
+ if !write(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ spaces = true
|
||
+ } else if is_break(value, i) {
|
||
+ if !breaks && value[i] == '\n' {
|
||
+ if !put_break(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !write_break(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ //emitter.indention = true
|
||
+ breaks = true
|
||
+ } else {
|
||
+ if breaks {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !write(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.indention = false
|
||
+ spaces = false
|
||
+ breaks = false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if len(value) > 0 {
|
||
+ emitter.whitespace = false
|
||
+ }
|
||
+ emitter.indention = false
|
||
+ if emitter.root_context {
|
||
+ emitter.open_ended = true
|
||
+ }
|
||
+
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
|
||
+
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ spaces := false
|
||
+ breaks := false
|
||
+ for i := 0; i < len(value); {
|
||
+ if is_space(value, i) {
|
||
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ i += width(value[i])
|
||
+ } else {
|
||
+ if !write(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ spaces = true
|
||
+ } else if is_break(value, i) {
|
||
+ if !breaks && value[i] == '\n' {
|
||
+ if !put_break(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !write_break(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ //emitter.indention = true
|
||
+ breaks = true
|
||
+ } else {
|
||
+ if breaks {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if value[i] == '\'' {
|
||
+ if !put(emitter, '\'') {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !write(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.indention = false
|
||
+ spaces = false
|
||
+ breaks = false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.whitespace = false
|
||
+ emitter.indention = false
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
|
||
+ spaces := false
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ for i := 0; i < len(value); {
|
||
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
|
||
+ is_bom(value, i) || is_break(value, i) ||
|
||
+ value[i] == '"' || value[i] == '\\' {
|
||
+
|
||
+ octet := value[i]
|
||
+
|
||
+ var w int
|
||
+ var v rune
|
||
+ switch {
|
||
+ case octet&0x80 == 0x00:
|
||
+ w, v = 1, rune(octet&0x7F)
|
||
+ case octet&0xE0 == 0xC0:
|
||
+ w, v = 2, rune(octet&0x1F)
|
||
+ case octet&0xF0 == 0xE0:
|
||
+ w, v = 3, rune(octet&0x0F)
|
||
+ case octet&0xF8 == 0xF0:
|
||
+ w, v = 4, rune(octet&0x07)
|
||
+ }
|
||
+ for k := 1; k < w; k++ {
|
||
+ octet = value[i+k]
|
||
+ v = (v << 6) + (rune(octet) & 0x3F)
|
||
+ }
|
||
+ i += w
|
||
+
|
||
+ if !put(emitter, '\\') {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ var ok bool
|
||
+ switch v {
|
||
+ case 0x00:
|
||
+ ok = put(emitter, '0')
|
||
+ case 0x07:
|
||
+ ok = put(emitter, 'a')
|
||
+ case 0x08:
|
||
+ ok = put(emitter, 'b')
|
||
+ case 0x09:
|
||
+ ok = put(emitter, 't')
|
||
+ case 0x0A:
|
||
+ ok = put(emitter, 'n')
|
||
+ case 0x0b:
|
||
+ ok = put(emitter, 'v')
|
||
+ case 0x0c:
|
||
+ ok = put(emitter, 'f')
|
||
+ case 0x0d:
|
||
+ ok = put(emitter, 'r')
|
||
+ case 0x1b:
|
||
+ ok = put(emitter, 'e')
|
||
+ case 0x22:
|
||
+ ok = put(emitter, '"')
|
||
+ case 0x5c:
|
||
+ ok = put(emitter, '\\')
|
||
+ case 0x85:
|
||
+ ok = put(emitter, 'N')
|
||
+ case 0xA0:
|
||
+ ok = put(emitter, '_')
|
||
+ case 0x2028:
|
||
+ ok = put(emitter, 'L')
|
||
+ case 0x2029:
|
||
+ ok = put(emitter, 'P')
|
||
+ default:
|
||
+ if v <= 0xFF {
|
||
+ ok = put(emitter, 'x')
|
||
+ w = 2
|
||
+ } else if v <= 0xFFFF {
|
||
+ ok = put(emitter, 'u')
|
||
+ w = 4
|
||
+ } else {
|
||
+ ok = put(emitter, 'U')
|
||
+ w = 8
|
||
+ }
|
||
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
|
||
+ digit := byte((v >> uint(k)) & 0x0F)
|
||
+ if digit < 10 {
|
||
+ ok = put(emitter, digit+'0')
|
||
+ } else {
|
||
+ ok = put(emitter, digit+'A'-10)
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ if !ok {
|
||
+ return false
|
||
+ }
|
||
+ spaces = false
|
||
+ } else if is_space(value, i) {
|
||
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if is_space(value, i+1) {
|
||
+ if !put(emitter, '\\') {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ i += width(value[i])
|
||
+ } else if !write(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ spaces = true
|
||
+ } else {
|
||
+ if !write(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ spaces = false
|
||
+ }
|
||
+ }
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.whitespace = false
|
||
+ emitter.indention = false
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
|
||
+ if is_space(value, 0) || is_break(value, 0) {
|
||
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
|
||
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ emitter.open_ended = false
|
||
+
|
||
+ var chomp_hint [1]byte
|
||
+ if len(value) == 0 {
|
||
+ chomp_hint[0] = '-'
|
||
+ } else {
|
||
+ i := len(value) - 1
|
||
+ for value[i]&0xC0 == 0x80 {
|
||
+ i--
|
||
+ }
|
||
+ if !is_break(value, i) {
|
||
+ chomp_hint[0] = '-'
|
||
+ } else if i == 0 {
|
||
+ chomp_hint[0] = '+'
|
||
+ emitter.open_ended = true
|
||
+ } else {
|
||
+ i--
|
||
+ for value[i]&0xC0 == 0x80 {
|
||
+ i--
|
||
+ }
|
||
+ if is_break(value, i) {
|
||
+ chomp_hint[0] = '+'
|
||
+ emitter.open_ended = true
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ if chomp_hint[0] != 0 {
|
||
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_line_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ //emitter.indention = true
|
||
+ emitter.whitespace = true
|
||
+ breaks := true
|
||
+ for i := 0; i < len(value); {
|
||
+ if is_break(value, i) {
|
||
+ if !write_break(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ //emitter.indention = true
|
||
+ breaks = true
|
||
+ } else {
|
||
+ if breaks {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if !write(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.indention = false
|
||
+ breaks = false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
|
||
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
|
||
+ return false
|
||
+ }
|
||
+ if !yaml_emitter_process_line_comment(emitter) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ //emitter.indention = true
|
||
+ emitter.whitespace = true
|
||
+
|
||
+ breaks := true
|
||
+ leading_spaces := true
|
||
+ for i := 0; i < len(value); {
|
||
+ if is_break(value, i) {
|
||
+ if !breaks && !leading_spaces && value[i] == '\n' {
|
||
+ k := 0
|
||
+ for is_break(value, k) {
|
||
+ k += width(value[k])
|
||
+ }
|
||
+ if !is_blankz(value, k) {
|
||
+ if !put_break(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ if !write_break(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ //emitter.indention = true
|
||
+ breaks = true
|
||
+ } else {
|
||
+ if breaks {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ leading_spaces = is_blank(value, i)
|
||
+ }
|
||
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
|
||
+ if !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ i += width(value[i])
|
||
+ } else {
|
||
+ if !write(emitter, value, &i) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ emitter.indention = false
|
||
+ breaks = false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
|
||
+ breaks := false
|
||
+ pound := false
|
||
+ for i := 0; i < len(comment); {
|
||
+ if is_break(comment, i) {
|
||
+ if !write_break(emitter, comment, &i) {
|
||
+ return false
|
||
+ }
|
||
+ //emitter.indention = true
|
||
+ breaks = true
|
||
+ pound = false
|
||
+ } else {
|
||
+ if breaks && !yaml_emitter_write_indent(emitter) {
|
||
+ return false
|
||
+ }
|
||
+ if !pound {
|
||
+ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
|
||
+ return false
|
||
+ }
|
||
+ pound = true
|
||
+ }
|
||
+ if !write(emitter, comment, &i) {
|
||
+ return false
|
||
+ }
|
||
+ emitter.indention = false
|
||
+ breaks = false
|
||
+ }
|
||
+ }
|
||
+ if !breaks && !put_break(emitter) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ emitter.whitespace = true
|
||
+ //emitter.indention = true
|
||
+ return true
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go
|
||
new file mode 100644
|
||
index 000000000000..de9e72a3e638
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/encode.go
|
||
@@ -0,0 +1,577 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "encoding"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "reflect"
|
||
+ "regexp"
|
||
+ "sort"
|
||
+ "strconv"
|
||
+ "strings"
|
||
+ "time"
|
||
+ "unicode/utf8"
|
||
+)
|
||
+
|
||
+type encoder struct {
|
||
+ emitter yaml_emitter_t
|
||
+ event yaml_event_t
|
||
+ out []byte
|
||
+ flow bool
|
||
+ indent int
|
||
+ doneInit bool
|
||
+}
|
||
+
|
||
+func newEncoder() *encoder {
|
||
+ e := &encoder{}
|
||
+ yaml_emitter_initialize(&e.emitter)
|
||
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
|
||
+ yaml_emitter_set_unicode(&e.emitter, true)
|
||
+ return e
|
||
+}
|
||
+
|
||
+func newEncoderWithWriter(w io.Writer) *encoder {
|
||
+ e := &encoder{}
|
||
+ yaml_emitter_initialize(&e.emitter)
|
||
+ yaml_emitter_set_output_writer(&e.emitter, w)
|
||
+ yaml_emitter_set_unicode(&e.emitter, true)
|
||
+ return e
|
||
+}
|
||
+
|
||
+func (e *encoder) init() {
|
||
+ if e.doneInit {
|
||
+ return
|
||
+ }
|
||
+ if e.indent == 0 {
|
||
+ e.indent = 4
|
||
+ }
|
||
+ e.emitter.best_indent = e.indent
|
||
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
||
+ e.emit()
|
||
+ e.doneInit = true
|
||
+}
|
||
+
|
||
+func (e *encoder) finish() {
|
||
+ e.emitter.open_ended = false
|
||
+ yaml_stream_end_event_initialize(&e.event)
|
||
+ e.emit()
|
||
+}
|
||
+
|
||
+func (e *encoder) destroy() {
|
||
+ yaml_emitter_delete(&e.emitter)
|
||
+}
|
||
+
|
||
+func (e *encoder) emit() {
|
||
+ // This will internally delete the e.event value.
|
||
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
|
||
+}
|
||
+
|
||
+func (e *encoder) must(ok bool) {
|
||
+ if !ok {
|
||
+ msg := e.emitter.problem
|
||
+ if msg == "" {
|
||
+ msg = "unknown problem generating YAML content"
|
||
+ }
|
||
+ failf("%s", msg)
|
||
+ }
|
||
+}
|
||
+
|
||
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
|
||
+ e.init()
|
||
+ var node *Node
|
||
+ if in.IsValid() {
|
||
+ node, _ = in.Interface().(*Node)
|
||
+ }
|
||
+ if node != nil && node.Kind == DocumentNode {
|
||
+ e.nodev(in)
|
||
+ } else {
|
||
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
||
+ e.emit()
|
||
+ e.marshal(tag, in)
|
||
+ yaml_document_end_event_initialize(&e.event, true)
|
||
+ e.emit()
|
||
+ }
|
||
+}
|
||
+
|
||
+func (e *encoder) marshal(tag string, in reflect.Value) {
|
||
+ tag = shortTag(tag)
|
||
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
|
||
+ e.nilv()
|
||
+ return
|
||
+ }
|
||
+ iface := in.Interface()
|
||
+ switch value := iface.(type) {
|
||
+ case *Node:
|
||
+ e.nodev(in)
|
||
+ return
|
||
+ case Node:
|
||
+ if !in.CanAddr() {
|
||
+ var n = reflect.New(in.Type()).Elem()
|
||
+ n.Set(in)
|
||
+ in = n
|
||
+ }
|
||
+ e.nodev(in.Addr())
|
||
+ return
|
||
+ case time.Time:
|
||
+ e.timev(tag, in)
|
||
+ return
|
||
+ case *time.Time:
|
||
+ e.timev(tag, in.Elem())
|
||
+ return
|
||
+ case time.Duration:
|
||
+ e.stringv(tag, reflect.ValueOf(value.String()))
|
||
+ return
|
||
+ case Marshaler:
|
||
+ v, err := value.MarshalYAML()
|
||
+ if err != nil {
|
||
+ fail(err)
|
||
+ }
|
||
+ if v == nil {
|
||
+ e.nilv()
|
||
+ return
|
||
+ }
|
||
+ e.marshal(tag, reflect.ValueOf(v))
|
||
+ return
|
||
+ case encoding.TextMarshaler:
|
||
+ text, err := value.MarshalText()
|
||
+ if err != nil {
|
||
+ fail(err)
|
||
+ }
|
||
+ in = reflect.ValueOf(string(text))
|
||
+ case nil:
|
||
+ e.nilv()
|
||
+ return
|
||
+ }
|
||
+ switch in.Kind() {
|
||
+ case reflect.Interface:
|
||
+ e.marshal(tag, in.Elem())
|
||
+ case reflect.Map:
|
||
+ e.mapv(tag, in)
|
||
+ case reflect.Ptr:
|
||
+ e.marshal(tag, in.Elem())
|
||
+ case reflect.Struct:
|
||
+ e.structv(tag, in)
|
||
+ case reflect.Slice, reflect.Array:
|
||
+ e.slicev(tag, in)
|
||
+ case reflect.String:
|
||
+ e.stringv(tag, in)
|
||
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||
+ e.intv(tag, in)
|
||
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||
+ e.uintv(tag, in)
|
||
+ case reflect.Float32, reflect.Float64:
|
||
+ e.floatv(tag, in)
|
||
+ case reflect.Bool:
|
||
+ e.boolv(tag, in)
|
||
+ default:
|
||
+ panic("cannot marshal type: " + in.Type().String())
|
||
+ }
|
||
+}
|
||
+
|
||
+func (e *encoder) mapv(tag string, in reflect.Value) {
|
||
+ e.mappingv(tag, func() {
|
||
+ keys := keyList(in.MapKeys())
|
||
+ sort.Sort(keys)
|
||
+ for _, k := range keys {
|
||
+ e.marshal("", k)
|
||
+ e.marshal("", in.MapIndex(k))
|
||
+ }
|
||
+ })
|
||
+}
|
||
+
|
||
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
|
||
+ for _, num := range index {
|
||
+ for {
|
||
+ if v.Kind() == reflect.Ptr {
|
||
+ if v.IsNil() {
|
||
+ return reflect.Value{}
|
||
+ }
|
||
+ v = v.Elem()
|
||
+ continue
|
||
+ }
|
||
+ break
|
||
+ }
|
||
+ v = v.Field(num)
|
||
+ }
|
||
+ return v
|
||
+}
|
||
+
|
||
+func (e *encoder) structv(tag string, in reflect.Value) {
|
||
+ sinfo, err := getStructInfo(in.Type())
|
||
+ if err != nil {
|
||
+ panic(err)
|
||
+ }
|
||
+ e.mappingv(tag, func() {
|
||
+ for _, info := range sinfo.FieldsList {
|
||
+ var value reflect.Value
|
||
+ if info.Inline == nil {
|
||
+ value = in.Field(info.Num)
|
||
+ } else {
|
||
+ value = e.fieldByIndex(in, info.Inline)
|
||
+ if !value.IsValid() {
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+ if info.OmitEmpty && isZero(value) {
|
||
+ continue
|
||
+ }
|
||
+ e.marshal("", reflect.ValueOf(info.Key))
|
||
+ e.flow = info.Flow
|
||
+ e.marshal("", value)
|
||
+ }
|
||
+ if sinfo.InlineMap >= 0 {
|
||
+ m := in.Field(sinfo.InlineMap)
|
||
+ if m.Len() > 0 {
|
||
+ e.flow = false
|
||
+ keys := keyList(m.MapKeys())
|
||
+ sort.Sort(keys)
|
||
+ for _, k := range keys {
|
||
+ if _, found := sinfo.FieldsMap[k.String()]; found {
|
||
+ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
|
||
+ }
|
||
+ e.marshal("", k)
|
||
+ e.flow = false
|
||
+ e.marshal("", m.MapIndex(k))
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ })
|
||
+}
|
||
+
|
||
+func (e *encoder) mappingv(tag string, f func()) {
|
||
+ implicit := tag == ""
|
||
+ style := yaml_BLOCK_MAPPING_STYLE
|
||
+ if e.flow {
|
||
+ e.flow = false
|
||
+ style = yaml_FLOW_MAPPING_STYLE
|
||
+ }
|
||
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
||
+ e.emit()
|
||
+ f()
|
||
+ yaml_mapping_end_event_initialize(&e.event)
|
||
+ e.emit()
|
||
+}
|
||
+
|
||
+func (e *encoder) slicev(tag string, in reflect.Value) {
|
||
+ implicit := tag == ""
|
||
+ style := yaml_BLOCK_SEQUENCE_STYLE
|
||
+ if e.flow {
|
||
+ e.flow = false
|
||
+ style = yaml_FLOW_SEQUENCE_STYLE
|
||
+ }
|
||
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||
+ e.emit()
|
||
+ n := in.Len()
|
||
+ for i := 0; i < n; i++ {
|
||
+ e.marshal("", in.Index(i))
|
||
+ }
|
||
+ e.must(yaml_sequence_end_event_initialize(&e.event))
|
||
+ e.emit()
|
||
+}
|
||
+
|
||
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
||
+//
|
||
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
||
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
||
+// the time being for compatibility with other parsers.
|
||
+func isBase60Float(s string) (result bool) {
|
||
+ // Fast path.
|
||
+ if s == "" {
|
||
+ return false
|
||
+ }
|
||
+ c := s[0]
|
||
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
||
+ return false
|
||
+ }
|
||
+ // Do the full match.
|
||
+ return base60float.MatchString(s)
|
||
+}
|
||
+
|
||
+// From http://yaml.org/type/float.html, except the regular expression there
|
||
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
||
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
||
+
|
||
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
|
||
+//
|
||
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
|
||
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
|
||
+// parsing.
|
||
+func isOldBool(s string) (result bool) {
|
||
+ switch s {
|
||
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
|
||
+ "n", "N", "no", "No", "NO", "off", "Off", "OFF":
|
||
+ return true
|
||
+ default:
|
||
+ return false
|
||
+ }
|
||
+}
|
||
+
|
||
+func (e *encoder) stringv(tag string, in reflect.Value) {
|
||
+ var style yaml_scalar_style_t
|
||
+ s := in.String()
|
||
+ canUsePlain := true
|
||
+ switch {
|
||
+ case !utf8.ValidString(s):
|
||
+ if tag == binaryTag {
|
||
+ failf("explicitly tagged !!binary data must be base64-encoded")
|
||
+ }
|
||
+ if tag != "" {
|
||
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
|
||
+ }
|
||
+ // It can't be encoded directly as YAML so use a binary tag
|
||
+ // and encode it as base64.
|
||
+ tag = binaryTag
|
||
+ s = encodeBase64(s)
|
||
+ case tag == "":
|
||
+ // Check to see if it would resolve to a specific
|
||
+ // tag when encoded unquoted. If it doesn't,
|
||
+ // there's no need to quote it.
|
||
+ rtag, _ := resolve("", s)
|
||
+ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
|
||
+ }
|
||
+ // Note: it's possible for user code to emit invalid YAML
|
||
+ // if they explicitly specify a tag and a string containing
|
||
+ // text that's incompatible with that tag.
|
||
+ switch {
|
||
+ case strings.Contains(s, "\n"):
|
||
+ if e.flow {
|
||
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||
+ } else {
|
||
+ style = yaml_LITERAL_SCALAR_STYLE
|
||
+ }
|
||
+ case canUsePlain:
|
||
+ style = yaml_PLAIN_SCALAR_STYLE
|
||
+ default:
|
||
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||
+ }
|
||
+ e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
|
||
+}
|
||
+
|
||
+func (e *encoder) boolv(tag string, in reflect.Value) {
|
||
+ var s string
|
||
+ if in.Bool() {
|
||
+ s = "true"
|
||
+ } else {
|
||
+ s = "false"
|
||
+ }
|
||
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||
+}
|
||
+
|
||
+func (e *encoder) intv(tag string, in reflect.Value) {
|
||
+ s := strconv.FormatInt(in.Int(), 10)
|
||
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||
+}
|
||
+
|
||
+func (e *encoder) uintv(tag string, in reflect.Value) {
|
||
+ s := strconv.FormatUint(in.Uint(), 10)
|
||
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||
+}
|
||
+
|
||
+func (e *encoder) timev(tag string, in reflect.Value) {
|
||
+ t := in.Interface().(time.Time)
|
||
+ s := t.Format(time.RFC3339Nano)
|
||
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||
+}
|
||
+
|
||
+func (e *encoder) floatv(tag string, in reflect.Value) {
|
||
+ // Issue #352: When formatting, use the precision of the underlying value
|
||
+ precision := 64
|
||
+ if in.Kind() == reflect.Float32 {
|
||
+ precision = 32
|
||
+ }
|
||
+
|
||
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
|
||
+ switch s {
|
||
+ case "+Inf":
|
||
+ s = ".inf"
|
||
+ case "-Inf":
|
||
+ s = "-.inf"
|
||
+ case "NaN":
|
||
+ s = ".nan"
|
||
+ }
|
||
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||
+}
|
||
+
|
||
+func (e *encoder) nilv() {
|
||
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||
+}
|
||
+
|
||
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
|
||
+ // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
|
||
+ implicit := tag == ""
|
||
+ if !implicit {
|
||
+ tag = longTag(tag)
|
||
+ }
|
||
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
||
+ e.event.head_comment = head
|
||
+ e.event.line_comment = line
|
||
+ e.event.foot_comment = foot
|
||
+ e.event.tail_comment = tail
|
||
+ e.emit()
|
||
+}
|
||
+
|
||
+func (e *encoder) nodev(in reflect.Value) {
|
||
+ e.node(in.Interface().(*Node), "")
|
||
+}
|
||
+
|
||
+func (e *encoder) node(node *Node, tail string) {
|
||
+ // Zero nodes behave as nil.
|
||
+ if node.Kind == 0 && node.IsZero() {
|
||
+ e.nilv()
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // If the tag was not explicitly requested, and dropping it won't change the
|
||
+ // implicit tag of the value, don't include it in the presentation.
|
||
+ var tag = node.Tag
|
||
+ var stag = shortTag(tag)
|
||
+ var forceQuoting bool
|
||
+ if tag != "" && node.Style&TaggedStyle == 0 {
|
||
+ if node.Kind == ScalarNode {
|
||
+ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
|
||
+ tag = ""
|
||
+ } else {
|
||
+ rtag, _ := resolve("", node.Value)
|
||
+ if rtag == stag {
|
||
+ tag = ""
|
||
+ } else if stag == strTag {
|
||
+ tag = ""
|
||
+ forceQuoting = true
|
||
+ }
|
||
+ }
|
||
+ } else {
|
||
+ var rtag string
|
||
+ switch node.Kind {
|
||
+ case MappingNode:
|
||
+ rtag = mapTag
|
||
+ case SequenceNode:
|
||
+ rtag = seqTag
|
||
+ }
|
||
+ if rtag == stag {
|
||
+ tag = ""
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ switch node.Kind {
|
||
+ case DocumentNode:
|
||
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
||
+ e.event.head_comment = []byte(node.HeadComment)
|
||
+ e.emit()
|
||
+ for _, node := range node.Content {
|
||
+ e.node(node, "")
|
||
+ }
|
||
+ yaml_document_end_event_initialize(&e.event, true)
|
||
+ e.event.foot_comment = []byte(node.FootComment)
|
||
+ e.emit()
|
||
+
|
||
+ case SequenceNode:
|
||
+ style := yaml_BLOCK_SEQUENCE_STYLE
|
||
+ if node.Style&FlowStyle != 0 {
|
||
+ style = yaml_FLOW_SEQUENCE_STYLE
|
||
+ }
|
||
+ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
|
||
+ e.event.head_comment = []byte(node.HeadComment)
|
||
+ e.emit()
|
||
+ for _, node := range node.Content {
|
||
+ e.node(node, "")
|
||
+ }
|
||
+ e.must(yaml_sequence_end_event_initialize(&e.event))
|
||
+ e.event.line_comment = []byte(node.LineComment)
|
||
+ e.event.foot_comment = []byte(node.FootComment)
|
||
+ e.emit()
|
||
+
|
||
+ case MappingNode:
|
||
+ style := yaml_BLOCK_MAPPING_STYLE
|
||
+ if node.Style&FlowStyle != 0 {
|
||
+ style = yaml_FLOW_MAPPING_STYLE
|
||
+ }
|
||
+ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
|
||
+ e.event.tail_comment = []byte(tail)
|
||
+ e.event.head_comment = []byte(node.HeadComment)
|
||
+ e.emit()
|
||
+
|
||
+ // The tail logic below moves the foot comment of prior keys to the following key,
|
||
+ // since the value for each key may be a nested structure and the foot needs to be
|
||
+ // processed only the entirety of the value is streamed. The last tail is processed
|
||
+ // with the mapping end event.
|
||
+ var tail string
|
||
+ for i := 0; i+1 < len(node.Content); i += 2 {
|
||
+ k := node.Content[i]
|
||
+ foot := k.FootComment
|
||
+ if foot != "" {
|
||
+ kopy := *k
|
||
+ kopy.FootComment = ""
|
||
+ k = &kopy
|
||
+ }
|
||
+ e.node(k, tail)
|
||
+ tail = foot
|
||
+
|
||
+ v := node.Content[i+1]
|
||
+ e.node(v, "")
|
||
+ }
|
||
+
|
||
+ yaml_mapping_end_event_initialize(&e.event)
|
||
+ e.event.tail_comment = []byte(tail)
|
||
+ e.event.line_comment = []byte(node.LineComment)
|
||
+ e.event.foot_comment = []byte(node.FootComment)
|
||
+ e.emit()
|
||
+
|
||
+ case AliasNode:
|
||
+ yaml_alias_event_initialize(&e.event, []byte(node.Value))
|
||
+ e.event.head_comment = []byte(node.HeadComment)
|
||
+ e.event.line_comment = []byte(node.LineComment)
|
||
+ e.event.foot_comment = []byte(node.FootComment)
|
||
+ e.emit()
|
||
+
|
||
+ case ScalarNode:
|
||
+ value := node.Value
|
||
+ if !utf8.ValidString(value) {
|
||
+ if stag == binaryTag {
|
||
+ failf("explicitly tagged !!binary data must be base64-encoded")
|
||
+ }
|
||
+ if stag != "" {
|
||
+ failf("cannot marshal invalid UTF-8 data as %s", stag)
|
||
+ }
|
||
+ // It can't be encoded directly as YAML so use a binary tag
|
||
+ // and encode it as base64.
|
||
+ tag = binaryTag
|
||
+ value = encodeBase64(value)
|
||
+ }
|
||
+
|
||
+ style := yaml_PLAIN_SCALAR_STYLE
|
||
+ switch {
|
||
+ case node.Style&DoubleQuotedStyle != 0:
|
||
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||
+ case node.Style&SingleQuotedStyle != 0:
|
||
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
|
||
+ case node.Style&LiteralStyle != 0:
|
||
+ style = yaml_LITERAL_SCALAR_STYLE
|
||
+ case node.Style&FoldedStyle != 0:
|
||
+ style = yaml_FOLDED_SCALAR_STYLE
|
||
+ case strings.Contains(value, "\n"):
|
||
+ style = yaml_LITERAL_SCALAR_STYLE
|
||
+ case forceQuoting:
|
||
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||
+ }
|
||
+
|
||
+ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
|
||
+ default:
|
||
+ failf("cannot encode node with unknown kind %d", node.Kind)
|
||
+ }
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go
|
||
new file mode 100644
|
||
index 000000000000..268558a0d632
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/parserc.go
|
||
@@ -0,0 +1,1258 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+// Copyright (c) 2006-2010 Kirill Simonov
|
||
+//
|
||
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||
+// this software and associated documentation files (the "Software"), to deal in
|
||
+// the Software without restriction, including without limitation the rights to
|
||
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||
+// of the Software, and to permit persons to whom the Software is furnished to do
|
||
+// so, subject to the following conditions:
|
||
+//
|
||
+// The above copyright notice and this permission notice shall be included in all
|
||
+// copies or substantial portions of the Software.
|
||
+//
|
||
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+// SOFTWARE.
|
||
+
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+)
|
||
+
|
||
+// The parser implements the following grammar:
|
||
+//
|
||
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
|
||
+// implicit_document ::= block_node DOCUMENT-END*
|
||
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
||
+// block_node_or_indentless_sequence ::=
|
||
+// ALIAS
|
||
+// | properties (block_content | indentless_block_sequence)?
|
||
+// | block_content
|
||
+// | indentless_block_sequence
|
||
+// block_node ::= ALIAS
|
||
+// | properties block_content?
|
||
+// | block_content
|
||
+// flow_node ::= ALIAS
|
||
+// | properties flow_content?
|
||
+// | flow_content
|
||
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
|
||
+// block_content ::= block_collection | flow_collection | SCALAR
|
||
+// flow_content ::= flow_collection | SCALAR
|
||
+// block_collection ::= block_sequence | block_mapping
|
||
+// flow_collection ::= flow_sequence | flow_mapping
|
||
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
|
||
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
|
||
+// block_mapping ::= BLOCK-MAPPING_START
|
||
+// ((KEY block_node_or_indentless_sequence?)?
|
||
+// (VALUE block_node_or_indentless_sequence?)?)*
|
||
+// BLOCK-END
|
||
+// flow_sequence ::= FLOW-SEQUENCE-START
|
||
+// (flow_sequence_entry FLOW-ENTRY)*
|
||
+// flow_sequence_entry?
|
||
+// FLOW-SEQUENCE-END
|
||
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||
+// flow_mapping ::= FLOW-MAPPING-START
|
||
+// (flow_mapping_entry FLOW-ENTRY)*
|
||
+// flow_mapping_entry?
|
||
+// FLOW-MAPPING-END
|
||
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||
+
|
||
+// Peek the next token in the token queue.
|
||
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
|
||
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
|
||
+ token := &parser.tokens[parser.tokens_head]
|
||
+ yaml_parser_unfold_comments(parser, token)
|
||
+ return token
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// yaml_parser_unfold_comments walks through the comments queue and joins all
|
||
+// comments behind the position of the provided token into the respective
|
||
+// top-level comment slices in the parser.
|
||
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
|
||
+ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
|
||
+ comment := &parser.comments[parser.comments_head]
|
||
+ if len(comment.head) > 0 {
|
||
+ if token.typ == yaml_BLOCK_END_TOKEN {
|
||
+ // No heads on ends, so keep comment.head for a follow up token.
|
||
+ break
|
||
+ }
|
||
+ if len(parser.head_comment) > 0 {
|
||
+ parser.head_comment = append(parser.head_comment, '\n')
|
||
+ }
|
||
+ parser.head_comment = append(parser.head_comment, comment.head...)
|
||
+ }
|
||
+ if len(comment.foot) > 0 {
|
||
+ if len(parser.foot_comment) > 0 {
|
||
+ parser.foot_comment = append(parser.foot_comment, '\n')
|
||
+ }
|
||
+ parser.foot_comment = append(parser.foot_comment, comment.foot...)
|
||
+ }
|
||
+ if len(comment.line) > 0 {
|
||
+ if len(parser.line_comment) > 0 {
|
||
+ parser.line_comment = append(parser.line_comment, '\n')
|
||
+ }
|
||
+ parser.line_comment = append(parser.line_comment, comment.line...)
|
||
+ }
|
||
+ *comment = yaml_comment_t{}
|
||
+ parser.comments_head++
|
||
+ }
|
||
+}
|
||
+
|
||
+// Remove the next token from the queue (must be called after peek_token).
|
||
+func skip_token(parser *yaml_parser_t) {
|
||
+ parser.token_available = false
|
||
+ parser.tokens_parsed++
|
||
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
|
||
+ parser.tokens_head++
|
||
+}
|
||
+
|
||
+// Get the next event.
|
||
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||
+ // Erase the event object.
|
||
+ *event = yaml_event_t{}
|
||
+
|
||
+ // No events after the end of the stream or error.
|
||
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // Generate the next event.
|
||
+ return yaml_parser_state_machine(parser, event)
|
||
+}
|
||
+
|
||
+// Set parser error.
|
||
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
|
||
+ parser.error = yaml_PARSER_ERROR
|
||
+ parser.problem = problem
|
||
+ parser.problem_mark = problem_mark
|
||
+ return false
|
||
+}
|
||
+
|
||
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
|
||
+ parser.error = yaml_PARSER_ERROR
|
||
+ parser.context = context
|
||
+ parser.context_mark = context_mark
|
||
+ parser.problem = problem
|
||
+ parser.problem_mark = problem_mark
|
||
+ return false
|
||
+}
|
||
+
|
||
+// State dispatcher.
|
||
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
|
||
+
|
||
+ switch parser.state {
|
||
+ case yaml_PARSE_STREAM_START_STATE:
|
||
+ return yaml_parser_parse_stream_start(parser, event)
|
||
+
|
||
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
|
||
+ return yaml_parser_parse_document_start(parser, event, true)
|
||
+
|
||
+ case yaml_PARSE_DOCUMENT_START_STATE:
|
||
+ return yaml_parser_parse_document_start(parser, event, false)
|
||
+
|
||
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
|
||
+ return yaml_parser_parse_document_content(parser, event)
|
||
+
|
||
+ case yaml_PARSE_DOCUMENT_END_STATE:
|
||
+ return yaml_parser_parse_document_end(parser, event)
|
||
+
|
||
+ case yaml_PARSE_BLOCK_NODE_STATE:
|
||
+ return yaml_parser_parse_node(parser, event, true, false)
|
||
+
|
||
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
|
||
+ return yaml_parser_parse_node(parser, event, true, true)
|
||
+
|
||
+ case yaml_PARSE_FLOW_NODE_STATE:
|
||
+ return yaml_parser_parse_node(parser, event, false, false)
|
||
+
|
||
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
|
||
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
|
||
+
|
||
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
|
||
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
|
||
+
|
||
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
|
||
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
|
||
+
|
||
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
|
||
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
|
||
+
|
||
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
|
||
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
|
||
+
|
||
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
|
||
+ return yaml_parser_parse_block_mapping_value(parser, event)
|
||
+
|
||
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
|
||
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
|
||
+
|
||
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
|
||
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
|
||
+
|
||
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
|
||
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
|
||
+
|
||
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
|
||
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
|
||
+
|
||
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
|
||
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
|
||
+
|
||
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
|
||
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
|
||
+
|
||
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
|
||
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
|
||
+
|
||
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
|
||
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
|
||
+
|
||
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
|
||
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
|
||
+
|
||
+ default:
|
||
+ panic("invalid parser state")
|
||
+ }
|
||
+}
|
||
+
|
||
+// Parse the production:
|
||
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
|
||
+// ************
|
||
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_STREAM_START_TOKEN {
|
||
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
|
||
+ }
|
||
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_STREAM_START_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.end_mark,
|
||
+ encoding: token.encoding,
|
||
+ }
|
||
+ skip_token(parser)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// implicit_document ::= block_node DOCUMENT-END*
|
||
+// *
|
||
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
||
+// *************************
|
||
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
|
||
+
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Parse extra document end indicators.
|
||
+ if !implicit {
|
||
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
|
||
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
|
||
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
|
||
+ token.typ != yaml_STREAM_END_TOKEN {
|
||
+ // Parse an implicit document.
|
||
+ if !yaml_parser_process_directives(parser, nil, nil) {
|
||
+ return false
|
||
+ }
|
||
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
|
||
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
|
||
+
|
||
+ var head_comment []byte
|
||
+ if len(parser.head_comment) > 0 {
|
||
+ // [Go] Scan the header comment backwards, and if an empty line is found, break
|
||
+ // the header so the part before the last empty line goes into the
|
||
+ // document header, while the bottom of it goes into a follow up event.
|
||
+ for i := len(parser.head_comment) - 1; i > 0; i-- {
|
||
+ if parser.head_comment[i] == '\n' {
|
||
+ if i == len(parser.head_comment)-1 {
|
||
+ head_comment = parser.head_comment[:i]
|
||
+ parser.head_comment = parser.head_comment[i+1:]
|
||
+ break
|
||
+ } else if parser.head_comment[i-1] == '\n' {
|
||
+ head_comment = parser.head_comment[:i-1]
|
||
+ parser.head_comment = parser.head_comment[i+1:]
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_DOCUMENT_START_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.end_mark,
|
||
+
|
||
+ head_comment: head_comment,
|
||
+ }
|
||
+
|
||
+ } else if token.typ != yaml_STREAM_END_TOKEN {
|
||
+ // Parse an explicit document.
|
||
+ var version_directive *yaml_version_directive_t
|
||
+ var tag_directives []yaml_tag_directive_t
|
||
+ start_mark := token.start_mark
|
||
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
|
||
+ return false
|
||
+ }
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
|
||
+ yaml_parser_set_parser_error(parser,
|
||
+ "did not find expected <document start>", token.start_mark)
|
||
+ return false
|
||
+ }
|
||
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
|
||
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
|
||
+ end_mark := token.end_mark
|
||
+
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_DOCUMENT_START_EVENT,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ version_directive: version_directive,
|
||
+ tag_directives: tag_directives,
|
||
+ implicit: false,
|
||
+ }
|
||
+ skip_token(parser)
|
||
+
|
||
+ } else {
|
||
+ // Parse the stream end.
|
||
+ parser.state = yaml_PARSE_END_STATE
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_STREAM_END_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.end_mark,
|
||
+ }
|
||
+ skip_token(parser)
|
||
+ }
|
||
+
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
||
+// ***********
|
||
+//
|
||
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
|
||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
|
||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
|
||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
|
||
+ token.typ == yaml_STREAM_END_TOKEN {
|
||
+ parser.state = parser.states[len(parser.states)-1]
|
||
+ parser.states = parser.states[:len(parser.states)-1]
|
||
+ return yaml_parser_process_empty_scalar(parser, event,
|
||
+ token.start_mark)
|
||
+ }
|
||
+ return yaml_parser_parse_node(parser, event, true, false)
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// implicit_document ::= block_node DOCUMENT-END*
|
||
+// *************
|
||
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
|
||
+//
|
||
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ start_mark := token.start_mark
|
||
+ end_mark := token.start_mark
|
||
+
|
||
+ implicit := true
|
||
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
|
||
+ end_mark = token.end_mark
|
||
+ skip_token(parser)
|
||
+ implicit = false
|
||
+ }
|
||
+
|
||
+ parser.tag_directives = parser.tag_directives[:0]
|
||
+
|
||
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_DOCUMENT_END_EVENT,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ implicit: implicit,
|
||
+ }
|
||
+ yaml_parser_set_event_comments(parser, event)
|
||
+ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
|
||
+ event.foot_comment = event.head_comment
|
||
+ event.head_comment = nil
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
|
||
+ event.head_comment = parser.head_comment
|
||
+ event.line_comment = parser.line_comment
|
||
+ event.foot_comment = parser.foot_comment
|
||
+ parser.head_comment = nil
|
||
+ parser.line_comment = nil
|
||
+ parser.foot_comment = nil
|
||
+ parser.tail_comment = nil
|
||
+ parser.stem_comment = nil
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// block_node_or_indentless_sequence ::=
|
||
+// ALIAS
|
||
+// *****
|
||
+// | properties (block_content | indentless_block_sequence)?
|
||
+// ********** *
|
||
+// | block_content | indentless_block_sequence
|
||
+// *
|
||
+// block_node ::= ALIAS
|
||
+// *****
|
||
+// | properties block_content?
|
||
+// ********** *
|
||
+// | block_content
|
||
+// *
|
||
+// flow_node ::= ALIAS
|
||
+// *****
|
||
+// | properties flow_content?
|
||
+// ********** *
|
||
+// | flow_content
|
||
+// *
|
||
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
|
||
+// *************************
|
||
+// block_content ::= block_collection | flow_collection | SCALAR
|
||
+// ******
|
||
+// flow_content ::= flow_collection | SCALAR
|
||
+// ******
|
||
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
|
||
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
|
||
+
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if token.typ == yaml_ALIAS_TOKEN {
|
||
+ parser.state = parser.states[len(parser.states)-1]
|
||
+ parser.states = parser.states[:len(parser.states)-1]
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_ALIAS_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.end_mark,
|
||
+ anchor: token.value,
|
||
+ }
|
||
+ yaml_parser_set_event_comments(parser, event)
|
||
+ skip_token(parser)
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ start_mark := token.start_mark
|
||
+ end_mark := token.start_mark
|
||
+
|
||
+ var tag_token bool
|
||
+ var tag_handle, tag_suffix, anchor []byte
|
||
+ var tag_mark yaml_mark_t
|
||
+ if token.typ == yaml_ANCHOR_TOKEN {
|
||
+ anchor = token.value
|
||
+ start_mark = token.start_mark
|
||
+ end_mark = token.end_mark
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ == yaml_TAG_TOKEN {
|
||
+ tag_token = true
|
||
+ tag_handle = token.value
|
||
+ tag_suffix = token.suffix
|
||
+ tag_mark = token.start_mark
|
||
+ end_mark = token.end_mark
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ } else if token.typ == yaml_TAG_TOKEN {
|
||
+ tag_token = true
|
||
+ tag_handle = token.value
|
||
+ tag_suffix = token.suffix
|
||
+ start_mark = token.start_mark
|
||
+ tag_mark = token.start_mark
|
||
+ end_mark = token.end_mark
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ == yaml_ANCHOR_TOKEN {
|
||
+ anchor = token.value
|
||
+ end_mark = token.end_mark
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ var tag []byte
|
||
+ if tag_token {
|
||
+ if len(tag_handle) == 0 {
|
||
+ tag = tag_suffix
|
||
+ tag_suffix = nil
|
||
+ } else {
|
||
+ for i := range parser.tag_directives {
|
||
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
|
||
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
|
||
+ tag = append(tag, tag_suffix...)
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ if len(tag) == 0 {
|
||
+ yaml_parser_set_parser_error_context(parser,
|
||
+ "while parsing a node", start_mark,
|
||
+ "found undefined tag handle", tag_mark)
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ implicit := len(tag) == 0
|
||
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
|
||
+ end_mark = token.end_mark
|
||
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SEQUENCE_START_EVENT,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ anchor: anchor,
|
||
+ tag: tag,
|
||
+ implicit: implicit,
|
||
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
|
||
+ }
|
||
+ return true
|
||
+ }
|
||
+ if token.typ == yaml_SCALAR_TOKEN {
|
||
+ var plain_implicit, quoted_implicit bool
|
||
+ end_mark = token.end_mark
|
||
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
|
||
+ plain_implicit = true
|
||
+ } else if len(tag) == 0 {
|
||
+ quoted_implicit = true
|
||
+ }
|
||
+ parser.state = parser.states[len(parser.states)-1]
|
||
+ parser.states = parser.states[:len(parser.states)-1]
|
||
+
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SCALAR_EVENT,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ anchor: anchor,
|
||
+ tag: tag,
|
||
+ value: token.value,
|
||
+ implicit: plain_implicit,
|
||
+ quoted_implicit: quoted_implicit,
|
||
+ style: yaml_style_t(token.style),
|
||
+ }
|
||
+ yaml_parser_set_event_comments(parser, event)
|
||
+ skip_token(parser)
|
||
+ return true
|
||
+ }
|
||
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
|
||
+ // [Go] Some of the events below can be merged as they differ only on style.
|
||
+ end_mark = token.end_mark
|
||
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SEQUENCE_START_EVENT,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ anchor: anchor,
|
||
+ tag: tag,
|
||
+ implicit: implicit,
|
||
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
|
||
+ }
|
||
+ yaml_parser_set_event_comments(parser, event)
|
||
+ return true
|
||
+ }
|
||
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
|
||
+ end_mark = token.end_mark
|
||
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_MAPPING_START_EVENT,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ anchor: anchor,
|
||
+ tag: tag,
|
||
+ implicit: implicit,
|
||
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
|
||
+ }
|
||
+ yaml_parser_set_event_comments(parser, event)
|
||
+ return true
|
||
+ }
|
||
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
|
||
+ end_mark = token.end_mark
|
||
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SEQUENCE_START_EVENT,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ anchor: anchor,
|
||
+ tag: tag,
|
||
+ implicit: implicit,
|
||
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
|
||
+ }
|
||
+ if parser.stem_comment != nil {
|
||
+ event.head_comment = parser.stem_comment
|
||
+ parser.stem_comment = nil
|
||
+ }
|
||
+ return true
|
||
+ }
|
||
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
|
||
+ end_mark = token.end_mark
|
||
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_MAPPING_START_EVENT,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ anchor: anchor,
|
||
+ tag: tag,
|
||
+ implicit: implicit,
|
||
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
|
||
+ }
|
||
+ if parser.stem_comment != nil {
|
||
+ event.head_comment = parser.stem_comment
|
||
+ parser.stem_comment = nil
|
||
+ }
|
||
+ return true
|
||
+ }
|
||
+ if len(anchor) > 0 || len(tag) > 0 {
|
||
+ parser.state = parser.states[len(parser.states)-1]
|
||
+ parser.states = parser.states[:len(parser.states)-1]
|
||
+
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SCALAR_EVENT,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ anchor: anchor,
|
||
+ tag: tag,
|
||
+ implicit: implicit,
|
||
+ quoted_implicit: false,
|
||
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
|
||
+ }
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ context := "while parsing a flow node"
|
||
+ if block {
|
||
+ context = "while parsing a block node"
|
||
+ }
|
||
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
|
||
+ "did not find expected node content", token.start_mark)
|
||
+ return false
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
|
||
+// ******************** *********** * *********
|
||
+//
|
||
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
|
||
+ if first {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ parser.marks = append(parser.marks, token.start_mark)
|
||
+ skip_token(parser)
|
||
+ }
|
||
+
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
|
||
+ mark := token.end_mark
|
||
+ prior_head_len := len(parser.head_comment)
|
||
+ skip_token(parser)
|
||
+ yaml_parser_split_stem_comment(parser, prior_head_len)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
|
||
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
|
||
+ return yaml_parser_parse_node(parser, event, true, false)
|
||
+ } else {
|
||
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
|
||
+ return yaml_parser_process_empty_scalar(parser, event, mark)
|
||
+ }
|
||
+ }
|
||
+ if token.typ == yaml_BLOCK_END_TOKEN {
|
||
+ parser.state = parser.states[len(parser.states)-1]
|
||
+ parser.states = parser.states[:len(parser.states)-1]
|
||
+ parser.marks = parser.marks[:len(parser.marks)-1]
|
||
+
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SEQUENCE_END_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.end_mark,
|
||
+ }
|
||
+
|
||
+ skip_token(parser)
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ context_mark := parser.marks[len(parser.marks)-1]
|
||
+ parser.marks = parser.marks[:len(parser.marks)-1]
|
||
+ return yaml_parser_set_parser_error_context(parser,
|
||
+ "while parsing a block collection", context_mark,
|
||
+ "did not find expected '-' indicator", token.start_mark)
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
|
||
+// *********** *
|
||
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
|
||
+ mark := token.end_mark
|
||
+ prior_head_len := len(parser.head_comment)
|
||
+ skip_token(parser)
|
||
+ yaml_parser_split_stem_comment(parser, prior_head_len)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
|
||
+ token.typ != yaml_KEY_TOKEN &&
|
||
+ token.typ != yaml_VALUE_TOKEN &&
|
||
+ token.typ != yaml_BLOCK_END_TOKEN {
|
||
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
|
||
+ return yaml_parser_parse_node(parser, event, true, false)
|
||
+ }
|
||
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
|
||
+ return yaml_parser_process_empty_scalar(parser, event, mark)
|
||
+ }
|
||
+ parser.state = parser.states[len(parser.states)-1]
|
||
+ parser.states = parser.states[:len(parser.states)-1]
|
||
+
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SEQUENCE_END_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Split stem comment from head comment.
|
||
+//
|
||
+// When a sequence or map is found under a sequence entry, the former head comment
|
||
+// is assigned to the underlying sequence or map as a whole, not the individual
|
||
+// sequence or map entry as would be expected otherwise. To handle this case the
|
||
+// previous head comment is moved aside as the stem comment.
|
||
+func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
|
||
+ if stem_len == 0 {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ token := peek_token(parser)
|
||
+ if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ parser.stem_comment = parser.head_comment[:stem_len]
|
||
+ if len(parser.head_comment) == stem_len {
|
||
+ parser.head_comment = nil
|
||
+ } else {
|
||
+ // Copy suffix to prevent very strange bugs if someone ever appends
|
||
+ // further bytes to the prefix in the stem_comment slice above.
|
||
+ parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
|
||
+ }
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// block_mapping ::= BLOCK-MAPPING_START
|
||
+// *******************
|
||
+// ((KEY block_node_or_indentless_sequence?)?
|
||
+// *** *
|
||
+// (VALUE block_node_or_indentless_sequence?)?)*
|
||
+//
|
||
+// BLOCK-END
|
||
+// *********
|
||
+//
|
||
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
|
||
+ if first {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ parser.marks = append(parser.marks, token.start_mark)
|
||
+ skip_token(parser)
|
||
+ }
|
||
+
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // [Go] A tail comment was left from the prior mapping value processed. Emit an event
|
||
+ // as it needs to be processed with that value and not the following key.
|
||
+ if len(parser.tail_comment) > 0 {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_TAIL_COMMENT_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.end_mark,
|
||
+ foot_comment: parser.tail_comment,
|
||
+ }
|
||
+ parser.tail_comment = nil
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ if token.typ == yaml_KEY_TOKEN {
|
||
+ mark := token.end_mark
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_KEY_TOKEN &&
|
||
+ token.typ != yaml_VALUE_TOKEN &&
|
||
+ token.typ != yaml_BLOCK_END_TOKEN {
|
||
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
|
||
+ return yaml_parser_parse_node(parser, event, true, true)
|
||
+ } else {
|
||
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
|
||
+ return yaml_parser_process_empty_scalar(parser, event, mark)
|
||
+ }
|
||
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
|
||
+ parser.state = parser.states[len(parser.states)-1]
|
||
+ parser.states = parser.states[:len(parser.states)-1]
|
||
+ parser.marks = parser.marks[:len(parser.marks)-1]
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_MAPPING_END_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.end_mark,
|
||
+ }
|
||
+ yaml_parser_set_event_comments(parser, event)
|
||
+ skip_token(parser)
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ context_mark := parser.marks[len(parser.marks)-1]
|
||
+ parser.marks = parser.marks[:len(parser.marks)-1]
|
||
+ return yaml_parser_set_parser_error_context(parser,
|
||
+ "while parsing a block mapping", context_mark,
|
||
+ "did not find expected key", token.start_mark)
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// block_mapping ::= BLOCK-MAPPING_START
|
||
+//
|
||
+// ((KEY block_node_or_indentless_sequence?)?
|
||
+//
|
||
+// (VALUE block_node_or_indentless_sequence?)?)*
|
||
+// ***** *
|
||
+// BLOCK-END
|
||
+//
|
||
+//
|
||
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ == yaml_VALUE_TOKEN {
|
||
+ mark := token.end_mark
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_KEY_TOKEN &&
|
||
+ token.typ != yaml_VALUE_TOKEN &&
|
||
+ token.typ != yaml_BLOCK_END_TOKEN {
|
||
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
|
||
+ return yaml_parser_parse_node(parser, event, true, true)
|
||
+ }
|
||
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
|
||
+ return yaml_parser_process_empty_scalar(parser, event, mark)
|
||
+ }
|
||
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
|
||
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// flow_sequence ::= FLOW-SEQUENCE-START
|
||
+// *******************
|
||
+// (flow_sequence_entry FLOW-ENTRY)*
|
||
+// * **********
|
||
+// flow_sequence_entry?
|
||
+// *
|
||
+// FLOW-SEQUENCE-END
|
||
+// *****************
|
||
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||
+// *
|
||
+//
|
||
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
|
||
+ if first {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ parser.marks = append(parser.marks, token.start_mark)
|
||
+ skip_token(parser)
|
||
+ }
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
|
||
+ if !first {
|
||
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ } else {
|
||
+ context_mark := parser.marks[len(parser.marks)-1]
|
||
+ parser.marks = parser.marks[:len(parser.marks)-1]
|
||
+ return yaml_parser_set_parser_error_context(parser,
|
||
+ "while parsing a flow sequence", context_mark,
|
||
+ "did not find expected ',' or ']'", token.start_mark)
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if token.typ == yaml_KEY_TOKEN {
|
||
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_MAPPING_START_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.end_mark,
|
||
+ implicit: true,
|
||
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
|
||
+ }
|
||
+ skip_token(parser)
|
||
+ return true
|
||
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
|
||
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
|
||
+ return yaml_parser_parse_node(parser, event, false, false)
|
||
+ }
|
||
+ }
|
||
+
|
||
+ parser.state = parser.states[len(parser.states)-1]
|
||
+ parser.states = parser.states[:len(parser.states)-1]
|
||
+ parser.marks = parser.marks[:len(parser.marks)-1]
|
||
+
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SEQUENCE_END_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.end_mark,
|
||
+ }
|
||
+ yaml_parser_set_event_comments(parser, event)
|
||
+
|
||
+ skip_token(parser)
|
||
+ return true
|
||
+}
|
||
+
|
||
+//
|
||
+// Parse the productions:
|
||
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||
+// *** *
|
||
+//
|
||
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_VALUE_TOKEN &&
|
||
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
|
||
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
|
||
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
|
||
+ return yaml_parser_parse_node(parser, event, false, false)
|
||
+ }
|
||
+ mark := token.end_mark
|
||
+ skip_token(parser)
|
||
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
|
||
+ return yaml_parser_process_empty_scalar(parser, event, mark)
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||
+// ***** *
|
||
+//
|
||
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ == yaml_VALUE_TOKEN {
|
||
+ skip_token(parser)
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
|
||
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
|
||
+ return yaml_parser_parse_node(parser, event, false, false)
|
||
+ }
|
||
+ }
|
||
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
|
||
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||
+// *
|
||
+//
|
||
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_MAPPING_END_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// flow_mapping ::= FLOW-MAPPING-START
|
||
+// ******************
|
||
+// (flow_mapping_entry FLOW-ENTRY)*
|
||
+// * **********
|
||
+// flow_mapping_entry?
|
||
+// ******************
|
||
+// FLOW-MAPPING-END
|
||
+// ****************
|
||
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||
+// * *** *
|
||
+//
|
||
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
|
||
+ if first {
|
||
+ token := peek_token(parser)
|
||
+ parser.marks = append(parser.marks, token.start_mark)
|
||
+ skip_token(parser)
|
||
+ }
|
||
+
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
|
||
+ if !first {
|
||
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ } else {
|
||
+ context_mark := parser.marks[len(parser.marks)-1]
|
||
+ parser.marks = parser.marks[:len(parser.marks)-1]
|
||
+ return yaml_parser_set_parser_error_context(parser,
|
||
+ "while parsing a flow mapping", context_mark,
|
||
+ "did not find expected ',' or '}'", token.start_mark)
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if token.typ == yaml_KEY_TOKEN {
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_VALUE_TOKEN &&
|
||
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
|
||
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
|
||
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
|
||
+ return yaml_parser_parse_node(parser, event, false, false)
|
||
+ } else {
|
||
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
|
||
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
|
||
+ }
|
||
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
|
||
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
|
||
+ return yaml_parser_parse_node(parser, event, false, false)
|
||
+ }
|
||
+ }
|
||
+
|
||
+ parser.state = parser.states[len(parser.states)-1]
|
||
+ parser.states = parser.states[:len(parser.states)-1]
|
||
+ parser.marks = parser.marks[:len(parser.marks)-1]
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_MAPPING_END_EVENT,
|
||
+ start_mark: token.start_mark,
|
||
+ end_mark: token.end_mark,
|
||
+ }
|
||
+ yaml_parser_set_event_comments(parser, event)
|
||
+ skip_token(parser)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Parse the productions:
|
||
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
|
||
+// * ***** *
|
||
+//
|
||
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if empty {
|
||
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
|
||
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
|
||
+ }
|
||
+ if token.typ == yaml_VALUE_TOKEN {
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
|
||
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
|
||
+ return yaml_parser_parse_node(parser, event, false, false)
|
||
+ }
|
||
+ }
|
||
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
|
||
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
|
||
+}
|
||
+
|
||
+// Generate an empty scalar event.
|
||
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
|
||
+ *event = yaml_event_t{
|
||
+ typ: yaml_SCALAR_EVENT,
|
||
+ start_mark: mark,
|
||
+ end_mark: mark,
|
||
+ value: nil, // Empty
|
||
+ implicit: true,
|
||
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+var default_tag_directives = []yaml_tag_directive_t{
|
||
+ {[]byte("!"), []byte("!")},
|
||
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
|
||
+}
|
||
+
|
||
+// Parse directives.
|
||
+func yaml_parser_process_directives(parser *yaml_parser_t,
|
||
+ version_directive_ref **yaml_version_directive_t,
|
||
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
|
||
+
|
||
+ var version_directive *yaml_version_directive_t
|
||
+ var tag_directives []yaml_tag_directive_t
|
||
+
|
||
+ token := peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
|
||
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
|
||
+ if version_directive != nil {
|
||
+ yaml_parser_set_parser_error(parser,
|
||
+ "found duplicate %YAML directive", token.start_mark)
|
||
+ return false
|
||
+ }
|
||
+ if token.major != 1 || token.minor != 1 {
|
||
+ yaml_parser_set_parser_error(parser,
|
||
+ "found incompatible YAML document", token.start_mark)
|
||
+ return false
|
||
+ }
|
||
+ version_directive = &yaml_version_directive_t{
|
||
+ major: token.major,
|
||
+ minor: token.minor,
|
||
+ }
|
||
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
|
||
+ value := yaml_tag_directive_t{
|
||
+ handle: token.value,
|
||
+ prefix: token.prefix,
|
||
+ }
|
||
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
|
||
+ return false
|
||
+ }
|
||
+ tag_directives = append(tag_directives, value)
|
||
+ }
|
||
+
|
||
+ skip_token(parser)
|
||
+ token = peek_token(parser)
|
||
+ if token == nil {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ for i := range default_tag_directives {
|
||
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if version_directive_ref != nil {
|
||
+ *version_directive_ref = version_directive
|
||
+ }
|
||
+ if tag_directives_ref != nil {
|
||
+ *tag_directives_ref = tag_directives
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Append a tag directive to the directives stack.
|
||
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
|
||
+ for i := range parser.tag_directives {
|
||
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
|
||
+ if allow_duplicates {
|
||
+ return true
|
||
+ }
|
||
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // [Go] I suspect the copy is unnecessary. This was likely done
|
||
+ // because there was no way to track ownership of the data.
|
||
+ value_copy := yaml_tag_directive_t{
|
||
+ handle: make([]byte, len(value.handle)),
|
||
+ prefix: make([]byte, len(value.prefix)),
|
||
+ }
|
||
+ copy(value_copy.handle, value.handle)
|
||
+ copy(value_copy.prefix, value.prefix)
|
||
+ parser.tag_directives = append(parser.tag_directives, value_copy)
|
||
+ return true
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go
|
||
new file mode 100644
|
||
index 000000000000..b7de0a89c462
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/readerc.go
|
||
@@ -0,0 +1,434 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+// Copyright (c) 2006-2010 Kirill Simonov
|
||
+//
|
||
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||
+// this software and associated documentation files (the "Software"), to deal in
|
||
+// the Software without restriction, including without limitation the rights to
|
||
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||
+// of the Software, and to permit persons to whom the Software is furnished to do
|
||
+// so, subject to the following conditions:
|
||
+//
|
||
+// The above copyright notice and this permission notice shall be included in all
|
||
+// copies or substantial portions of the Software.
|
||
+//
|
||
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+// SOFTWARE.
|
||
+
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "io"
|
||
+)
|
||
+
|
||
+// Set the reader error and return 0.
|
||
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
|
||
+ parser.error = yaml_READER_ERROR
|
||
+ parser.problem = problem
|
||
+ parser.problem_offset = offset
|
||
+ parser.problem_value = value
|
||
+ return false
|
||
+}
|
||
+
|
||
+// Byte order marks.
|
||
+const (
|
||
+ bom_UTF8 = "\xef\xbb\xbf"
|
||
+ bom_UTF16LE = "\xff\xfe"
|
||
+ bom_UTF16BE = "\xfe\xff"
|
||
+)
|
||
+
|
||
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
||
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
||
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
||
+ // Ensure that we had enough bytes in the raw buffer.
|
||
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
||
+ if !yaml_parser_update_raw_buffer(parser) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Determine the encoding.
|
||
+ buf := parser.raw_buffer
|
||
+ pos := parser.raw_buffer_pos
|
||
+ avail := len(buf) - pos
|
||
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
|
||
+ parser.encoding = yaml_UTF16LE_ENCODING
|
||
+ parser.raw_buffer_pos += 2
|
||
+ parser.offset += 2
|
||
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
|
||
+ parser.encoding = yaml_UTF16BE_ENCODING
|
||
+ parser.raw_buffer_pos += 2
|
||
+ parser.offset += 2
|
||
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
|
||
+ parser.encoding = yaml_UTF8_ENCODING
|
||
+ parser.raw_buffer_pos += 3
|
||
+ parser.offset += 3
|
||
+ } else {
|
||
+ parser.encoding = yaml_UTF8_ENCODING
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Update the raw buffer.
|
||
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
||
+ size_read := 0
|
||
+
|
||
+ // Return if the raw buffer is full.
|
||
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // Return on EOF.
|
||
+ if parser.eof {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // Move the remaining bytes in the raw buffer to the beginning.
|
||
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
||
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
||
+ }
|
||
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
||
+ parser.raw_buffer_pos = 0
|
||
+
|
||
+ // Call the read handler to fill the buffer.
|
||
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
||
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
||
+ if err == io.EOF {
|
||
+ parser.eof = true
|
||
+ } else if err != nil {
|
||
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Ensure that the buffer contains at least `length` characters.
|
||
+// Return true on success, false on failure.
|
||
+//
|
||
+// The length is supposed to be significantly less that the buffer size.
|
||
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
||
+ if parser.read_handler == nil {
|
||
+ panic("read handler must be set")
|
||
+ }
|
||
+
|
||
+ // [Go] This function was changed to guarantee the requested length size at EOF.
|
||
+ // The fact we need to do this is pretty awful, but the description above implies
|
||
+ // for that to be the case, and there are tests
|
||
+
|
||
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
|
||
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||
+ // [Go] ACTUALLY! Read the documentation of this function above.
|
||
+ // This is just broken. To return true, we need to have the
|
||
+ // given length in the buffer. Not doing that means every single
|
||
+ // check that calls this function to make sure the buffer has a
|
||
+ // given length is Go) panicking; or C) accessing invalid memory.
|
||
+ //return true
|
||
+ }
|
||
+
|
||
+ // Return if the buffer contains enough characters.
|
||
+ if parser.unread >= length {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // Determine the input encoding if it is not known yet.
|
||
+ if parser.encoding == yaml_ANY_ENCODING {
|
||
+ if !yaml_parser_determine_encoding(parser) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Move the unread characters to the beginning of the buffer.
|
||
+ buffer_len := len(parser.buffer)
|
||
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
|
||
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
||
+ buffer_len -= parser.buffer_pos
|
||
+ parser.buffer_pos = 0
|
||
+ } else if parser.buffer_pos == buffer_len {
|
||
+ buffer_len = 0
|
||
+ parser.buffer_pos = 0
|
||
+ }
|
||
+
|
||
+ // Open the whole buffer for writing, and cut it before returning.
|
||
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
|
||
+
|
||
+ // Fill the buffer until it has enough characters.
|
||
+ first := true
|
||
+ for parser.unread < length {
|
||
+
|
||
+ // Fill the raw buffer if necessary.
|
||
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||
+ if !yaml_parser_update_raw_buffer(parser) {
|
||
+ parser.buffer = parser.buffer[:buffer_len]
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ first = false
|
||
+
|
||
+ // Decode the raw buffer.
|
||
+ inner:
|
||
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
||
+ var value rune
|
||
+ var width int
|
||
+
|
||
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
||
+
|
||
+ // Decode the next character.
|
||
+ switch parser.encoding {
|
||
+ case yaml_UTF8_ENCODING:
|
||
+ // Decode a UTF-8 character. Check RFC 3629
|
||
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
||
+ //
|
||
+ // The following table (taken from the RFC) is used for
|
||
+ // decoding.
|
||
+ //
|
||
+ // Char. number range | UTF-8 octet sequence
|
||
+ // (hexadecimal) | (binary)
|
||
+ // --------------------+------------------------------------
|
||
+ // 0000 0000-0000 007F | 0xxxxxxx
|
||
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
||
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
||
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||
+ //
|
||
+ // Additionally, the characters in the range 0xD800-0xDFFF
|
||
+ // are prohibited as they are reserved for use with UTF-16
|
||
+ // surrogate pairs.
|
||
+
|
||
+ // Determine the length of the UTF-8 sequence.
|
||
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
|
||
+ switch {
|
||
+ case octet&0x80 == 0x00:
|
||
+ width = 1
|
||
+ case octet&0xE0 == 0xC0:
|
||
+ width = 2
|
||
+ case octet&0xF0 == 0xE0:
|
||
+ width = 3
|
||
+ case octet&0xF8 == 0xF0:
|
||
+ width = 4
|
||
+ default:
|
||
+ // The leading octet is invalid.
|
||
+ return yaml_parser_set_reader_error(parser,
|
||
+ "invalid leading UTF-8 octet",
|
||
+ parser.offset, int(octet))
|
||
+ }
|
||
+
|
||
+ // Check if the raw buffer contains an incomplete character.
|
||
+ if width > raw_unread {
|
||
+ if parser.eof {
|
||
+ return yaml_parser_set_reader_error(parser,
|
||
+ "incomplete UTF-8 octet sequence",
|
||
+ parser.offset, -1)
|
||
+ }
|
||
+ break inner
|
||
+ }
|
||
+
|
||
+ // Decode the leading octet.
|
||
+ switch {
|
||
+ case octet&0x80 == 0x00:
|
||
+ value = rune(octet & 0x7F)
|
||
+ case octet&0xE0 == 0xC0:
|
||
+ value = rune(octet & 0x1F)
|
||
+ case octet&0xF0 == 0xE0:
|
||
+ value = rune(octet & 0x0F)
|
||
+ case octet&0xF8 == 0xF0:
|
||
+ value = rune(octet & 0x07)
|
||
+ default:
|
||
+ value = 0
|
||
+ }
|
||
+
|
||
+ // Check and decode the trailing octets.
|
||
+ for k := 1; k < width; k++ {
|
||
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
||
+
|
||
+ // Check if the octet is valid.
|
||
+ if (octet & 0xC0) != 0x80 {
|
||
+ return yaml_parser_set_reader_error(parser,
|
||
+ "invalid trailing UTF-8 octet",
|
||
+ parser.offset+k, int(octet))
|
||
+ }
|
||
+
|
||
+ // Decode the octet.
|
||
+ value = (value << 6) + rune(octet&0x3F)
|
||
+ }
|
||
+
|
||
+ // Check the length of the sequence against the value.
|
||
+ switch {
|
||
+ case width == 1:
|
||
+ case width == 2 && value >= 0x80:
|
||
+ case width == 3 && value >= 0x800:
|
||
+ case width == 4 && value >= 0x10000:
|
||
+ default:
|
||
+ return yaml_parser_set_reader_error(parser,
|
||
+ "invalid length of a UTF-8 sequence",
|
||
+ parser.offset, -1)
|
||
+ }
|
||
+
|
||
+ // Check the range of the value.
|
||
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
|
||
+ return yaml_parser_set_reader_error(parser,
|
||
+ "invalid Unicode character",
|
||
+ parser.offset, int(value))
|
||
+ }
|
||
+
|
||
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
|
||
+ var low, high int
|
||
+ if parser.encoding == yaml_UTF16LE_ENCODING {
|
||
+ low, high = 0, 1
|
||
+ } else {
|
||
+ low, high = 1, 0
|
||
+ }
|
||
+
|
||
+ // The UTF-16 encoding is not as simple as one might
|
||
+ // naively think. Check RFC 2781
|
||
+ // (http://www.ietf.org/rfc/rfc2781.txt).
|
||
+ //
|
||
+ // Normally, two subsequent bytes describe a Unicode
|
||
+ // character. However a special technique (called a
|
||
+ // surrogate pair) is used for specifying character
|
||
+ // values larger than 0xFFFF.
|
||
+ //
|
||
+ // A surrogate pair consists of two pseudo-characters:
|
||
+ // high surrogate area (0xD800-0xDBFF)
|
||
+ // low surrogate area (0xDC00-0xDFFF)
|
||
+ //
|
||
+ // The following formulas are used for decoding
|
||
+ // and encoding characters using surrogate pairs:
|
||
+ //
|
||
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
||
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
||
+ // W1 = 110110yyyyyyyyyy
|
||
+ // W2 = 110111xxxxxxxxxx
|
||
+ //
|
||
+ // where U is the character value, W1 is the high surrogate
|
||
+ // area, W2 is the low surrogate area.
|
||
+
|
||
+ // Check for incomplete UTF-16 character.
|
||
+ if raw_unread < 2 {
|
||
+ if parser.eof {
|
||
+ return yaml_parser_set_reader_error(parser,
|
||
+ "incomplete UTF-16 character",
|
||
+ parser.offset, -1)
|
||
+ }
|
||
+ break inner
|
||
+ }
|
||
+
|
||
+ // Get the character.
|
||
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
||
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
||
+
|
||
+ // Check for unexpected low surrogate area.
|
||
+ if value&0xFC00 == 0xDC00 {
|
||
+ return yaml_parser_set_reader_error(parser,
|
||
+ "unexpected low surrogate area",
|
||
+ parser.offset, int(value))
|
||
+ }
|
||
+
|
||
+ // Check for a high surrogate area.
|
||
+ if value&0xFC00 == 0xD800 {
|
||
+ width = 4
|
||
+
|
||
+ // Check for incomplete surrogate pair.
|
||
+ if raw_unread < 4 {
|
||
+ if parser.eof {
|
||
+ return yaml_parser_set_reader_error(parser,
|
||
+ "incomplete UTF-16 surrogate pair",
|
||
+ parser.offset, -1)
|
||
+ }
|
||
+ break inner
|
||
+ }
|
||
+
|
||
+ // Get the next character.
|
||
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
||
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
||
+
|
||
+ // Check for a low surrogate area.
|
||
+ if value2&0xFC00 != 0xDC00 {
|
||
+ return yaml_parser_set_reader_error(parser,
|
||
+ "expected low surrogate area",
|
||
+ parser.offset+2, int(value2))
|
||
+ }
|
||
+
|
||
+ // Generate the value of the surrogate pair.
|
||
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
||
+ } else {
|
||
+ width = 2
|
||
+ }
|
||
+
|
||
+ default:
|
||
+ panic("impossible")
|
||
+ }
|
||
+
|
||
+ // Check if the character is in the allowed range:
|
||
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
||
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
||
+ // | [#x10000-#x10FFFF] (32 bit)
|
||
+ switch {
|
||
+ case value == 0x09:
|
||
+ case value == 0x0A:
|
||
+ case value == 0x0D:
|
||
+ case value >= 0x20 && value <= 0x7E:
|
||
+ case value == 0x85:
|
||
+ case value >= 0xA0 && value <= 0xD7FF:
|
||
+ case value >= 0xE000 && value <= 0xFFFD:
|
||
+ case value >= 0x10000 && value <= 0x10FFFF:
|
||
+ default:
|
||
+ return yaml_parser_set_reader_error(parser,
|
||
+ "control characters are not allowed",
|
||
+ parser.offset, int(value))
|
||
+ }
|
||
+
|
||
+ // Move the raw pointers.
|
||
+ parser.raw_buffer_pos += width
|
||
+ parser.offset += width
|
||
+
|
||
+ // Finally put the character into the buffer.
|
||
+ if value <= 0x7F {
|
||
+ // 0000 0000-0000 007F . 0xxxxxxx
|
||
+ parser.buffer[buffer_len+0] = byte(value)
|
||
+ buffer_len += 1
|
||
+ } else if value <= 0x7FF {
|
||
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
|
||
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
|
||
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
|
||
+ buffer_len += 2
|
||
+ } else if value <= 0xFFFF {
|
||
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
|
||
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
|
||
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
||
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
|
||
+ buffer_len += 3
|
||
+ } else {
|
||
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
|
||
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
||
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
||
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
|
||
+ buffer_len += 4
|
||
+ }
|
||
+
|
||
+ parser.unread++
|
||
+ }
|
||
+
|
||
+ // On EOF, put NUL into the buffer and return.
|
||
+ if parser.eof {
|
||
+ parser.buffer[buffer_len] = 0
|
||
+ buffer_len++
|
||
+ parser.unread++
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ // [Go] Read the documentation of this function above. To return true,
|
||
+ // we need to have the given length in the buffer. Not doing that means
|
||
+ // every single check that calls this function to make sure the buffer
|
||
+ // has a given length is Go) panicking; or C) accessing invalid memory.
|
||
+ // This happens here due to the EOF above breaking early.
|
||
+ for buffer_len < length {
|
||
+ parser.buffer[buffer_len] = 0
|
||
+ buffer_len++
|
||
+ }
|
||
+ parser.buffer = parser.buffer[:buffer_len]
|
||
+ return true
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go
|
||
new file mode 100644
|
||
index 000000000000..64ae888057a5
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/resolve.go
|
||
@@ -0,0 +1,326 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "encoding/base64"
|
||
+ "math"
|
||
+ "regexp"
|
||
+ "strconv"
|
||
+ "strings"
|
||
+ "time"
|
||
+)
|
||
+
|
||
+type resolveMapItem struct {
|
||
+ value interface{}
|
||
+ tag string
|
||
+}
|
||
+
|
||
+var resolveTable = make([]byte, 256)
|
||
+var resolveMap = make(map[string]resolveMapItem)
|
||
+
|
||
+func init() {
|
||
+ t := resolveTable
|
||
+ t[int('+')] = 'S' // Sign
|
||
+ t[int('-')] = 'S'
|
||
+ for _, c := range "0123456789" {
|
||
+ t[int(c)] = 'D' // Digit
|
||
+ }
|
||
+ for _, c := range "yYnNtTfFoO~" {
|
||
+ t[int(c)] = 'M' // In map
|
||
+ }
|
||
+ t[int('.')] = '.' // Float (potentially in map)
|
||
+
|
||
+ var resolveMapList = []struct {
|
||
+ v interface{}
|
||
+ tag string
|
||
+ l []string
|
||
+ }{
|
||
+ {true, boolTag, []string{"true", "True", "TRUE"}},
|
||
+ {false, boolTag, []string{"false", "False", "FALSE"}},
|
||
+ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
|
||
+ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
|
||
+ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
|
||
+ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
|
||
+ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
|
||
+ {"<<", mergeTag, []string{"<<"}},
|
||
+ }
|
||
+
|
||
+ m := resolveMap
|
||
+ for _, item := range resolveMapList {
|
||
+ for _, s := range item.l {
|
||
+ m[s] = resolveMapItem{item.v, item.tag}
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+const (
|
||
+ nullTag = "!!null"
|
||
+ boolTag = "!!bool"
|
||
+ strTag = "!!str"
|
||
+ intTag = "!!int"
|
||
+ floatTag = "!!float"
|
||
+ timestampTag = "!!timestamp"
|
||
+ seqTag = "!!seq"
|
||
+ mapTag = "!!map"
|
||
+ binaryTag = "!!binary"
|
||
+ mergeTag = "!!merge"
|
||
+)
|
||
+
|
||
+var longTags = make(map[string]string)
|
||
+var shortTags = make(map[string]string)
|
||
+
|
||
+func init() {
|
||
+ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
|
||
+ ltag := longTag(stag)
|
||
+ longTags[stag] = ltag
|
||
+ shortTags[ltag] = stag
|
||
+ }
|
||
+}
|
||
+
|
||
+const longTagPrefix = "tag:yaml.org,2002:"
|
||
+
|
||
+func shortTag(tag string) string {
|
||
+ if strings.HasPrefix(tag, longTagPrefix) {
|
||
+ if stag, ok := shortTags[tag]; ok {
|
||
+ return stag
|
||
+ }
|
||
+ return "!!" + tag[len(longTagPrefix):]
|
||
+ }
|
||
+ return tag
|
||
+}
|
||
+
|
||
+func longTag(tag string) string {
|
||
+ if strings.HasPrefix(tag, "!!") {
|
||
+ if ltag, ok := longTags[tag]; ok {
|
||
+ return ltag
|
||
+ }
|
||
+ return longTagPrefix + tag[2:]
|
||
+ }
|
||
+ return tag
|
||
+}
|
||
+
|
||
+func resolvableTag(tag string) bool {
|
||
+ switch tag {
|
||
+ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
|
||
+ return true
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
|
||
+
|
||
+func resolve(tag string, in string) (rtag string, out interface{}) {
|
||
+ tag = shortTag(tag)
|
||
+ if !resolvableTag(tag) {
|
||
+ return tag, in
|
||
+ }
|
||
+
|
||
+ defer func() {
|
||
+ switch tag {
|
||
+ case "", rtag, strTag, binaryTag:
|
||
+ return
|
||
+ case floatTag:
|
||
+ if rtag == intTag {
|
||
+ switch v := out.(type) {
|
||
+ case int64:
|
||
+ rtag = floatTag
|
||
+ out = float64(v)
|
||
+ return
|
||
+ case int:
|
||
+ rtag = floatTag
|
||
+ out = float64(v)
|
||
+ return
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
|
||
+ }()
|
||
+
|
||
+ // Any data is accepted as a !!str or !!binary.
|
||
+ // Otherwise, the prefix is enough of a hint about what it might be.
|
||
+ hint := byte('N')
|
||
+ if in != "" {
|
||
+ hint = resolveTable[in[0]]
|
||
+ }
|
||
+ if hint != 0 && tag != strTag && tag != binaryTag {
|
||
+ // Handle things we can lookup in a map.
|
||
+ if item, ok := resolveMap[in]; ok {
|
||
+ return item.tag, item.value
|
||
+ }
|
||
+
|
||
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
|
||
+ // are purposefully unsupported here. They're still quoted on
|
||
+ // the way out for compatibility with other parser, though.
|
||
+
|
||
+ switch hint {
|
||
+ case 'M':
|
||
+ // We've already checked the map above.
|
||
+
|
||
+ case '.':
|
||
+ // Not in the map, so maybe a normal float.
|
||
+ floatv, err := strconv.ParseFloat(in, 64)
|
||
+ if err == nil {
|
||
+ return floatTag, floatv
|
||
+ }
|
||
+
|
||
+ case 'D', 'S':
|
||
+ // Int, float, or timestamp.
|
||
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
|
||
+ // !!timestamp tag.
|
||
+ if tag == "" || tag == timestampTag {
|
||
+ t, ok := parseTimestamp(in)
|
||
+ if ok {
|
||
+ return timestampTag, t
|
||
+ }
|
||
+ }
|
||
+
|
||
+ plain := strings.Replace(in, "_", "", -1)
|
||
+ intv, err := strconv.ParseInt(plain, 0, 64)
|
||
+ if err == nil {
|
||
+ if intv == int64(int(intv)) {
|
||
+ return intTag, int(intv)
|
||
+ } else {
|
||
+ return intTag, intv
|
||
+ }
|
||
+ }
|
||
+ uintv, err := strconv.ParseUint(plain, 0, 64)
|
||
+ if err == nil {
|
||
+ return intTag, uintv
|
||
+ }
|
||
+ if yamlStyleFloat.MatchString(plain) {
|
||
+ floatv, err := strconv.ParseFloat(plain, 64)
|
||
+ if err == nil {
|
||
+ return floatTag, floatv
|
||
+ }
|
||
+ }
|
||
+ if strings.HasPrefix(plain, "0b") {
|
||
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||
+ if err == nil {
|
||
+ if intv == int64(int(intv)) {
|
||
+ return intTag, int(intv)
|
||
+ } else {
|
||
+ return intTag, intv
|
||
+ }
|
||
+ }
|
||
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
|
||
+ if err == nil {
|
||
+ return intTag, uintv
|
||
+ }
|
||
+ } else if strings.HasPrefix(plain, "-0b") {
|
||
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
|
||
+ if err == nil {
|
||
+ if true || intv == int64(int(intv)) {
|
||
+ return intTag, int(intv)
|
||
+ } else {
|
||
+ return intTag, intv
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ // Octals as introduced in version 1.2 of the spec.
|
||
+ // Octals from the 1.1 spec, spelled as 0777, are still
|
||
+ // decoded by default in v3 as well for compatibility.
|
||
+ // May be dropped in v4 depending on how usage evolves.
|
||
+ if strings.HasPrefix(plain, "0o") {
|
||
+ intv, err := strconv.ParseInt(plain[2:], 8, 64)
|
||
+ if err == nil {
|
||
+ if intv == int64(int(intv)) {
|
||
+ return intTag, int(intv)
|
||
+ } else {
|
||
+ return intTag, intv
|
||
+ }
|
||
+ }
|
||
+ uintv, err := strconv.ParseUint(plain[2:], 8, 64)
|
||
+ if err == nil {
|
||
+ return intTag, uintv
|
||
+ }
|
||
+ } else if strings.HasPrefix(plain, "-0o") {
|
||
+ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
|
||
+ if err == nil {
|
||
+ if true || intv == int64(int(intv)) {
|
||
+ return intTag, int(intv)
|
||
+ } else {
|
||
+ return intTag, intv
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ default:
|
||
+ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
|
||
+ }
|
||
+ }
|
||
+ return strTag, in
|
||
+}
|
||
+
|
||
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
||
+// as appropriate for the resulting length.
|
||
+func encodeBase64(s string) string {
|
||
+ const lineLen = 70
|
||
+ encLen := base64.StdEncoding.EncodedLen(len(s))
|
||
+ lines := encLen/lineLen + 1
|
||
+ buf := make([]byte, encLen*2+lines)
|
||
+ in := buf[0:encLen]
|
||
+ out := buf[encLen:]
|
||
+ base64.StdEncoding.Encode(in, []byte(s))
|
||
+ k := 0
|
||
+ for i := 0; i < len(in); i += lineLen {
|
||
+ j := i + lineLen
|
||
+ if j > len(in) {
|
||
+ j = len(in)
|
||
+ }
|
||
+ k += copy(out[k:], in[i:j])
|
||
+ if lines > 1 {
|
||
+ out[k] = '\n'
|
||
+ k++
|
||
+ }
|
||
+ }
|
||
+ return string(out[:k])
|
||
+}
|
||
+
|
||
+// This is a subset of the formats allowed by the regular expression
|
||
+// defined at http://yaml.org/type/timestamp.html.
|
||
+var allowedTimestampFormats = []string{
|
||
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
||
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
||
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
|
||
+ "2006-1-2", // date only
|
||
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
|
||
+ // from the set of examples.
|
||
+}
|
||
+
|
||
+// parseTimestamp parses s as a timestamp string and
|
||
+// returns the timestamp and reports whether it succeeded.
|
||
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
|
||
+func parseTimestamp(s string) (time.Time, bool) {
|
||
+ // TODO write code to check all the formats supported by
|
||
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
|
||
+
|
||
+ // Quick check: all date formats start with YYYY-.
|
||
+ i := 0
|
||
+ for ; i < len(s); i++ {
|
||
+ if c := s[i]; c < '0' || c > '9' {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ if i != 4 || i == len(s) || s[i] != '-' {
|
||
+ return time.Time{}, false
|
||
+ }
|
||
+ for _, format := range allowedTimestampFormats {
|
||
+ if t, err := time.Parse(format, s); err == nil {
|
||
+ return t, true
|
||
+ }
|
||
+ }
|
||
+ return time.Time{}, false
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go
|
||
new file mode 100644
|
||
index 000000000000..ca0070108f4e
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/scannerc.go
|
||
@@ -0,0 +1,3038 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+// Copyright (c) 2006-2010 Kirill Simonov
|
||
+//
|
||
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||
+// this software and associated documentation files (the "Software"), to deal in
|
||
+// the Software without restriction, including without limitation the rights to
|
||
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||
+// of the Software, and to permit persons to whom the Software is furnished to do
|
||
+// so, subject to the following conditions:
|
||
+//
|
||
+// The above copyright notice and this permission notice shall be included in all
|
||
+// copies or substantial portions of the Software.
|
||
+//
|
||
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+// SOFTWARE.
|
||
+
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+)
|
||
+
|
||
+// Introduction
|
||
+// ************
|
||
+//
|
||
+// The following notes assume that you are familiar with the YAML specification
|
||
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
|
||
+// some cases we are less restrictive that it requires.
|
||
+//
|
||
+// The process of transforming a YAML stream into a sequence of events is
|
||
+// divided on two steps: Scanning and Parsing.
|
||
+//
|
||
+// The Scanner transforms the input stream into a sequence of tokens, while the
|
||
+// parser transform the sequence of tokens produced by the Scanner into a
|
||
+// sequence of parsing events.
|
||
+//
|
||
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
|
||
+// is a straightforward implementation of a recursive-descendant parser (or,
|
||
+// LL(1) parser, as it is usually called).
|
||
+//
|
||
+// Actually there are two issues of Scanning that might be called "clever", the
|
||
+// rest is quite straightforward. The issues are "block collection start" and
|
||
+// "simple keys". Both issues are explained below in details.
|
||
+//
|
||
+// Here the Scanning step is explained and implemented. We start with the list
|
||
+// of all the tokens produced by the Scanner together with short descriptions.
|
||
+//
|
||
+// Now, tokens:
|
||
+//
|
||
+// STREAM-START(encoding) # The stream start.
|
||
+// STREAM-END # The stream end.
|
||
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
|
||
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
|
||
+// DOCUMENT-START # '---'
|
||
+// DOCUMENT-END # '...'
|
||
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
|
||
+// BLOCK-MAPPING-START # sequence or a block mapping.
|
||
+// BLOCK-END # Indentation decrease.
|
||
+// FLOW-SEQUENCE-START # '['
|
||
+// FLOW-SEQUENCE-END # ']'
|
||
+// BLOCK-SEQUENCE-START # '{'
|
||
+// BLOCK-SEQUENCE-END # '}'
|
||
+// BLOCK-ENTRY # '-'
|
||
+// FLOW-ENTRY # ','
|
||
+// KEY # '?' or nothing (simple keys).
|
||
+// VALUE # ':'
|
||
+// ALIAS(anchor) # '*anchor'
|
||
+// ANCHOR(anchor) # '&anchor'
|
||
+// TAG(handle,suffix) # '!handle!suffix'
|
||
+// SCALAR(value,style) # A scalar.
|
||
+//
|
||
+// The following two tokens are "virtual" tokens denoting the beginning and the
|
||
+// end of the stream:
|
||
+//
|
||
+// STREAM-START(encoding)
|
||
+// STREAM-END
|
||
+//
|
||
+// We pass the information about the input stream encoding with the
|
||
+// STREAM-START token.
|
||
+//
|
||
+// The next two tokens are responsible for tags:
|
||
+//
|
||
+// VERSION-DIRECTIVE(major,minor)
|
||
+// TAG-DIRECTIVE(handle,prefix)
|
||
+//
|
||
+// Example:
|
||
+//
|
||
+// %YAML 1.1
|
||
+// %TAG ! !foo
|
||
+// %TAG !yaml! tag:yaml.org,2002:
|
||
+// ---
|
||
+//
|
||
+// The correspoding sequence of tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// VERSION-DIRECTIVE(1,1)
|
||
+// TAG-DIRECTIVE("!","!foo")
|
||
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
|
||
+// DOCUMENT-START
|
||
+// STREAM-END
|
||
+//
|
||
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
|
||
+// line.
|
||
+//
|
||
+// The document start and end indicators are represented by:
|
||
+//
|
||
+// DOCUMENT-START
|
||
+// DOCUMENT-END
|
||
+//
|
||
+// Note that if a YAML stream contains an implicit document (without '---'
|
||
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
|
||
+// produced.
|
||
+//
|
||
+// In the following examples, we present whole documents together with the
|
||
+// produced tokens.
|
||
+//
|
||
+// 1. An implicit document:
|
||
+//
|
||
+// 'a scalar'
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// SCALAR("a scalar",single-quoted)
|
||
+// STREAM-END
|
||
+//
|
||
+// 2. An explicit document:
|
||
+//
|
||
+// ---
|
||
+// 'a scalar'
|
||
+// ...
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// DOCUMENT-START
|
||
+// SCALAR("a scalar",single-quoted)
|
||
+// DOCUMENT-END
|
||
+// STREAM-END
|
||
+//
|
||
+// 3. Several documents in a stream:
|
||
+//
|
||
+// 'a scalar'
|
||
+// ---
|
||
+// 'another scalar'
|
||
+// ---
|
||
+// 'yet another scalar'
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// SCALAR("a scalar",single-quoted)
|
||
+// DOCUMENT-START
|
||
+// SCALAR("another scalar",single-quoted)
|
||
+// DOCUMENT-START
|
||
+// SCALAR("yet another scalar",single-quoted)
|
||
+// STREAM-END
|
||
+//
|
||
+// We have already introduced the SCALAR token above. The following tokens are
|
||
+// used to describe aliases, anchors, tag, and scalars:
|
||
+//
|
||
+// ALIAS(anchor)
|
||
+// ANCHOR(anchor)
|
||
+// TAG(handle,suffix)
|
||
+// SCALAR(value,style)
|
||
+//
|
||
+// The following series of examples illustrate the usage of these tokens:
|
||
+//
|
||
+// 1. A recursive sequence:
|
||
+//
|
||
+// &A [ *A ]
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// ANCHOR("A")
|
||
+// FLOW-SEQUENCE-START
|
||
+// ALIAS("A")
|
||
+// FLOW-SEQUENCE-END
|
||
+// STREAM-END
|
||
+//
|
||
+// 2. A tagged scalar:
|
||
+//
|
||
+// !!float "3.14" # A good approximation.
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// TAG("!!","float")
|
||
+// SCALAR("3.14",double-quoted)
|
||
+// STREAM-END
|
||
+//
|
||
+// 3. Various scalar styles:
|
||
+//
|
||
+// --- # Implicit empty plain scalars do not produce tokens.
|
||
+// --- a plain scalar
|
||
+// --- 'a single-quoted scalar'
|
||
+// --- "a double-quoted scalar"
|
||
+// --- |-
|
||
+// a literal scalar
|
||
+// --- >-
|
||
+// a folded
|
||
+// scalar
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// DOCUMENT-START
|
||
+// DOCUMENT-START
|
||
+// SCALAR("a plain scalar",plain)
|
||
+// DOCUMENT-START
|
||
+// SCALAR("a single-quoted scalar",single-quoted)
|
||
+// DOCUMENT-START
|
||
+// SCALAR("a double-quoted scalar",double-quoted)
|
||
+// DOCUMENT-START
|
||
+// SCALAR("a literal scalar",literal)
|
||
+// DOCUMENT-START
|
||
+// SCALAR("a folded scalar",folded)
|
||
+// STREAM-END
|
||
+//
|
||
+// Now it's time to review collection-related tokens. We will start with
|
||
+// flow collections:
|
||
+//
|
||
+// FLOW-SEQUENCE-START
|
||
+// FLOW-SEQUENCE-END
|
||
+// FLOW-MAPPING-START
|
||
+// FLOW-MAPPING-END
|
||
+// FLOW-ENTRY
|
||
+// KEY
|
||
+// VALUE
|
||
+//
|
||
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
|
||
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
|
||
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
|
||
+// indicators '?' and ':', which are used for denoting mapping keys and values,
|
||
+// are represented by the KEY and VALUE tokens.
|
||
+//
|
||
+// The following examples show flow collections:
|
||
+//
|
||
+// 1. A flow sequence:
|
||
+//
|
||
+// [item 1, item 2, item 3]
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// FLOW-SEQUENCE-START
|
||
+// SCALAR("item 1",plain)
|
||
+// FLOW-ENTRY
|
||
+// SCALAR("item 2",plain)
|
||
+// FLOW-ENTRY
|
||
+// SCALAR("item 3",plain)
|
||
+// FLOW-SEQUENCE-END
|
||
+// STREAM-END
|
||
+//
|
||
+// 2. A flow mapping:
|
||
+//
|
||
+// {
|
||
+// a simple key: a value, # Note that the KEY token is produced.
|
||
+// ? a complex key: another value,
|
||
+// }
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// FLOW-MAPPING-START
|
||
+// KEY
|
||
+// SCALAR("a simple key",plain)
|
||
+// VALUE
|
||
+// SCALAR("a value",plain)
|
||
+// FLOW-ENTRY
|
||
+// KEY
|
||
+// SCALAR("a complex key",plain)
|
||
+// VALUE
|
||
+// SCALAR("another value",plain)
|
||
+// FLOW-ENTRY
|
||
+// FLOW-MAPPING-END
|
||
+// STREAM-END
|
||
+//
|
||
+// A simple key is a key which is not denoted by the '?' indicator. Note that
|
||
+// the Scanner still produce the KEY token whenever it encounters a simple key.
|
||
+//
|
||
+// For scanning block collections, the following tokens are used (note that we
|
||
+// repeat KEY and VALUE here):
|
||
+//
|
||
+// BLOCK-SEQUENCE-START
|
||
+// BLOCK-MAPPING-START
|
||
+// BLOCK-END
|
||
+// BLOCK-ENTRY
|
||
+// KEY
|
||
+// VALUE
|
||
+//
|
||
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
|
||
+// increase that precedes a block collection (cf. the INDENT token in Python).
|
||
+// The token BLOCK-END denote indentation decrease that ends a block collection
|
||
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
|
||
+// that makes detections of these tokens more complex.
|
||
+//
|
||
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
|
||
+// '-', '?', and ':' correspondingly.
|
||
+//
|
||
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
|
||
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
|
||
+//
|
||
+// 1. Block sequences:
|
||
+//
|
||
+// - item 1
|
||
+// - item 2
|
||
+// -
|
||
+// - item 3.1
|
||
+// - item 3.2
|
||
+// -
|
||
+// key 1: value 1
|
||
+// key 2: value 2
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// BLOCK-SEQUENCE-START
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 1",plain)
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 2",plain)
|
||
+// BLOCK-ENTRY
|
||
+// BLOCK-SEQUENCE-START
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 3.1",plain)
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 3.2",plain)
|
||
+// BLOCK-END
|
||
+// BLOCK-ENTRY
|
||
+// BLOCK-MAPPING-START
|
||
+// KEY
|
||
+// SCALAR("key 1",plain)
|
||
+// VALUE
|
||
+// SCALAR("value 1",plain)
|
||
+// KEY
|
||
+// SCALAR("key 2",plain)
|
||
+// VALUE
|
||
+// SCALAR("value 2",plain)
|
||
+// BLOCK-END
|
||
+// BLOCK-END
|
||
+// STREAM-END
|
||
+//
|
||
+// 2. Block mappings:
|
||
+//
|
||
+// a simple key: a value # The KEY token is produced here.
|
||
+// ? a complex key
|
||
+// : another value
|
||
+// a mapping:
|
||
+// key 1: value 1
|
||
+// key 2: value 2
|
||
+// a sequence:
|
||
+// - item 1
|
||
+// - item 2
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// BLOCK-MAPPING-START
|
||
+// KEY
|
||
+// SCALAR("a simple key",plain)
|
||
+// VALUE
|
||
+// SCALAR("a value",plain)
|
||
+// KEY
|
||
+// SCALAR("a complex key",plain)
|
||
+// VALUE
|
||
+// SCALAR("another value",plain)
|
||
+// KEY
|
||
+// SCALAR("a mapping",plain)
|
||
+// BLOCK-MAPPING-START
|
||
+// KEY
|
||
+// SCALAR("key 1",plain)
|
||
+// VALUE
|
||
+// SCALAR("value 1",plain)
|
||
+// KEY
|
||
+// SCALAR("key 2",plain)
|
||
+// VALUE
|
||
+// SCALAR("value 2",plain)
|
||
+// BLOCK-END
|
||
+// KEY
|
||
+// SCALAR("a sequence",plain)
|
||
+// VALUE
|
||
+// BLOCK-SEQUENCE-START
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 1",plain)
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 2",plain)
|
||
+// BLOCK-END
|
||
+// BLOCK-END
|
||
+// STREAM-END
|
||
+//
|
||
+// YAML does not always require to start a new block collection from a new
|
||
+// line. If the current line contains only '-', '?', and ':' indicators, a new
|
||
+// block collection may start at the current line. The following examples
|
||
+// illustrate this case:
|
||
+//
|
||
+// 1. Collections in a sequence:
|
||
+//
|
||
+// - - item 1
|
||
+// - item 2
|
||
+// - key 1: value 1
|
||
+// key 2: value 2
|
||
+// - ? complex key
|
||
+// : complex value
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// BLOCK-SEQUENCE-START
|
||
+// BLOCK-ENTRY
|
||
+// BLOCK-SEQUENCE-START
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 1",plain)
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 2",plain)
|
||
+// BLOCK-END
|
||
+// BLOCK-ENTRY
|
||
+// BLOCK-MAPPING-START
|
||
+// KEY
|
||
+// SCALAR("key 1",plain)
|
||
+// VALUE
|
||
+// SCALAR("value 1",plain)
|
||
+// KEY
|
||
+// SCALAR("key 2",plain)
|
||
+// VALUE
|
||
+// SCALAR("value 2",plain)
|
||
+// BLOCK-END
|
||
+// BLOCK-ENTRY
|
||
+// BLOCK-MAPPING-START
|
||
+// KEY
|
||
+// SCALAR("complex key")
|
||
+// VALUE
|
||
+// SCALAR("complex value")
|
||
+// BLOCK-END
|
||
+// BLOCK-END
|
||
+// STREAM-END
|
||
+//
|
||
+// 2. Collections in a mapping:
|
||
+//
|
||
+// ? a sequence
|
||
+// : - item 1
|
||
+// - item 2
|
||
+// ? a mapping
|
||
+// : key 1: value 1
|
||
+// key 2: value 2
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// BLOCK-MAPPING-START
|
||
+// KEY
|
||
+// SCALAR("a sequence",plain)
|
||
+// VALUE
|
||
+// BLOCK-SEQUENCE-START
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 1",plain)
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 2",plain)
|
||
+// BLOCK-END
|
||
+// KEY
|
||
+// SCALAR("a mapping",plain)
|
||
+// VALUE
|
||
+// BLOCK-MAPPING-START
|
||
+// KEY
|
||
+// SCALAR("key 1",plain)
|
||
+// VALUE
|
||
+// SCALAR("value 1",plain)
|
||
+// KEY
|
||
+// SCALAR("key 2",plain)
|
||
+// VALUE
|
||
+// SCALAR("value 2",plain)
|
||
+// BLOCK-END
|
||
+// BLOCK-END
|
||
+// STREAM-END
|
||
+//
|
||
+// YAML also permits non-indented sequences if they are included into a block
|
||
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
|
||
+//
|
||
+// key:
|
||
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
|
||
+// - item 2
|
||
+//
|
||
+// Tokens:
|
||
+//
|
||
+// STREAM-START(utf-8)
|
||
+// BLOCK-MAPPING-START
|
||
+// KEY
|
||
+// SCALAR("key",plain)
|
||
+// VALUE
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 1",plain)
|
||
+// BLOCK-ENTRY
|
||
+// SCALAR("item 2",plain)
|
||
+// BLOCK-END
|
||
+//
|
||
+
|
||
+// Ensure that the buffer contains the required number of characters.
|
||
+// Return true on success, false on failure (reader error or memory error).
|
||
+func cache(parser *yaml_parser_t, length int) bool {
|
||
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
|
||
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
|
||
+}
|
||
+
|
||
+// Advance the buffer pointer.
|
||
+func skip(parser *yaml_parser_t) {
|
||
+ if !is_blank(parser.buffer, parser.buffer_pos) {
|
||
+ parser.newlines = 0
|
||
+ }
|
||
+ parser.mark.index++
|
||
+ parser.mark.column++
|
||
+ parser.unread--
|
||
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
|
||
+}
|
||
+
|
||
+func skip_line(parser *yaml_parser_t) {
|
||
+ if is_crlf(parser.buffer, parser.buffer_pos) {
|
||
+ parser.mark.index += 2
|
||
+ parser.mark.column = 0
|
||
+ parser.mark.line++
|
||
+ parser.unread -= 2
|
||
+ parser.buffer_pos += 2
|
||
+ parser.newlines++
|
||
+ } else if is_break(parser.buffer, parser.buffer_pos) {
|
||
+ parser.mark.index++
|
||
+ parser.mark.column = 0
|
||
+ parser.mark.line++
|
||
+ parser.unread--
|
||
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
|
||
+ parser.newlines++
|
||
+ }
|
||
+}
|
||
+
|
||
+// Copy a character to a string buffer and advance pointers.
|
||
+func read(parser *yaml_parser_t, s []byte) []byte {
|
||
+ if !is_blank(parser.buffer, parser.buffer_pos) {
|
||
+ parser.newlines = 0
|
||
+ }
|
||
+ w := width(parser.buffer[parser.buffer_pos])
|
||
+ if w == 0 {
|
||
+ panic("invalid character sequence")
|
||
+ }
|
||
+ if len(s) == 0 {
|
||
+ s = make([]byte, 0, 32)
|
||
+ }
|
||
+ if w == 1 && len(s)+w <= cap(s) {
|
||
+ s = s[:len(s)+1]
|
||
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
|
||
+ parser.buffer_pos++
|
||
+ } else {
|
||
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
|
||
+ parser.buffer_pos += w
|
||
+ }
|
||
+ parser.mark.index++
|
||
+ parser.mark.column++
|
||
+ parser.unread--
|
||
+ return s
|
||
+}
|
||
+
|
||
+// Copy a line break character to a string buffer and advance pointers.
|
||
+func read_line(parser *yaml_parser_t, s []byte) []byte {
|
||
+ buf := parser.buffer
|
||
+ pos := parser.buffer_pos
|
||
+ switch {
|
||
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
|
||
+ // CR LF . LF
|
||
+ s = append(s, '\n')
|
||
+ parser.buffer_pos += 2
|
||
+ parser.mark.index++
|
||
+ parser.unread--
|
||
+ case buf[pos] == '\r' || buf[pos] == '\n':
|
||
+ // CR|LF . LF
|
||
+ s = append(s, '\n')
|
||
+ parser.buffer_pos += 1
|
||
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
|
||
+ // NEL . LF
|
||
+ s = append(s, '\n')
|
||
+ parser.buffer_pos += 2
|
||
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
|
||
+ // LS|PS . LS|PS
|
||
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
|
||
+ parser.buffer_pos += 3
|
||
+ default:
|
||
+ return s
|
||
+ }
|
||
+ parser.mark.index++
|
||
+ parser.mark.column = 0
|
||
+ parser.mark.line++
|
||
+ parser.unread--
|
||
+ parser.newlines++
|
||
+ return s
|
||
+}
|
||
+
|
||
+// Get the next token.
|
||
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
|
||
+ // Erase the token object.
|
||
+ *token = yaml_token_t{} // [Go] Is this necessary?
|
||
+
|
||
+ // No tokens after STREAM-END or error.
|
||
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // Ensure that the tokens queue contains enough tokens.
|
||
+ if !parser.token_available {
|
||
+ if !yaml_parser_fetch_more_tokens(parser) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Fetch the next token from the queue.
|
||
+ *token = parser.tokens[parser.tokens_head]
|
||
+ parser.tokens_head++
|
||
+ parser.tokens_parsed++
|
||
+ parser.token_available = false
|
||
+
|
||
+ if token.typ == yaml_STREAM_END_TOKEN {
|
||
+ parser.stream_end_produced = true
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Set the scanner error and return false.
|
||
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
|
||
+ parser.error = yaml_SCANNER_ERROR
|
||
+ parser.context = context
|
||
+ parser.context_mark = context_mark
|
||
+ parser.problem = problem
|
||
+ parser.problem_mark = parser.mark
|
||
+ return false
|
||
+}
|
||
+
|
||
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
|
||
+ context := "while parsing a tag"
|
||
+ if directive {
|
||
+ context = "while parsing a %TAG directive"
|
||
+ }
|
||
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
|
||
+}
|
||
+
|
||
+func trace(args ...interface{}) func() {
|
||
+ pargs := append([]interface{}{"+++"}, args...)
|
||
+ fmt.Println(pargs...)
|
||
+ pargs = append([]interface{}{"---"}, args...)
|
||
+ return func() { fmt.Println(pargs...) }
|
||
+}
|
||
+
|
||
+// Ensure that the tokens queue contains at least one token which can be
|
||
+// returned to the Parser.
|
||
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
|
||
+ // While we need more tokens to fetch, do it.
|
||
+ for {
|
||
+ // [Go] The comment parsing logic requires a lookahead of two tokens
|
||
+ // so that foot comments may be parsed in time of associating them
|
||
+ // with the tokens that are parsed before them, and also for line
|
||
+ // comments to be transformed into head comments in some edge cases.
|
||
+ if parser.tokens_head < len(parser.tokens)-2 {
|
||
+ // If a potential simple key is at the head position, we need to fetch
|
||
+ // the next token to disambiguate it.
|
||
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
|
||
+ if !ok {
|
||
+ break
|
||
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
|
||
+ return false
|
||
+ } else if !valid {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ // Fetch the next token.
|
||
+ if !yaml_parser_fetch_next_token(parser) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ parser.token_available = true
|
||
+ return true
|
||
+}
|
||
+
|
||
+// The dispatcher for token fetchers.
|
||
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
|
||
+ // Ensure that the buffer is initialized.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Check if we just started scanning. Fetch STREAM-START then.
|
||
+ if !parser.stream_start_produced {
|
||
+ return yaml_parser_fetch_stream_start(parser)
|
||
+ }
|
||
+
|
||
+ scan_mark := parser.mark
|
||
+
|
||
+ // Eat whitespaces and comments until we reach the next token.
|
||
+ if !yaml_parser_scan_to_next_token(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // [Go] While unrolling indents, transform the head comments of prior
|
||
+ // indentation levels observed after scan_start into foot comments at
|
||
+ // the respective indexes.
|
||
+
|
||
+ // Check the indentation level against the current column.
|
||
+ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
|
||
+ // of the longest indicators ('--- ' and '... ').
|
||
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Is it the end of the stream?
|
||
+ if is_z(parser.buffer, parser.buffer_pos) {
|
||
+ return yaml_parser_fetch_stream_end(parser)
|
||
+ }
|
||
+
|
||
+ // Is it a directive?
|
||
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
|
||
+ return yaml_parser_fetch_directive(parser)
|
||
+ }
|
||
+
|
||
+ buf := parser.buffer
|
||
+ pos := parser.buffer_pos
|
||
+
|
||
+ // Is it the document start indicator?
|
||
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
|
||
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
|
||
+ }
|
||
+
|
||
+ // Is it the document end indicator?
|
||
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
|
||
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
|
||
+ }
|
||
+
|
||
+ comment_mark := parser.mark
|
||
+ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
|
||
+ // Associate any following comments with the prior token.
|
||
+ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
|
||
+ }
|
||
+ defer func() {
|
||
+ if !ok {
|
||
+ return
|
||
+ }
|
||
+ if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
|
||
+ // Sequence indicators alone have no line comments. It becomes
|
||
+ // a head comment for whatever follows.
|
||
+ return
|
||
+ }
|
||
+ if !yaml_parser_scan_line_comment(parser, comment_mark) {
|
||
+ ok = false
|
||
+ return
|
||
+ }
|
||
+ }()
|
||
+
|
||
+ // Is it the flow sequence start indicator?
|
||
+ if buf[pos] == '[' {
|
||
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
|
||
+ }
|
||
+
|
||
+ // Is it the flow mapping start indicator?
|
||
+ if parser.buffer[parser.buffer_pos] == '{' {
|
||
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
|
||
+ }
|
||
+
|
||
+ // Is it the flow sequence end indicator?
|
||
+ if parser.buffer[parser.buffer_pos] == ']' {
|
||
+ return yaml_parser_fetch_flow_collection_end(parser,
|
||
+ yaml_FLOW_SEQUENCE_END_TOKEN)
|
||
+ }
|
||
+
|
||
+ // Is it the flow mapping end indicator?
|
||
+ if parser.buffer[parser.buffer_pos] == '}' {
|
||
+ return yaml_parser_fetch_flow_collection_end(parser,
|
||
+ yaml_FLOW_MAPPING_END_TOKEN)
|
||
+ }
|
||
+
|
||
+ // Is it the flow entry indicator?
|
||
+ if parser.buffer[parser.buffer_pos] == ',' {
|
||
+ return yaml_parser_fetch_flow_entry(parser)
|
||
+ }
|
||
+
|
||
+ // Is it the block entry indicator?
|
||
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
|
||
+ return yaml_parser_fetch_block_entry(parser)
|
||
+ }
|
||
+
|
||
+ // Is it the key indicator?
|
||
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
|
||
+ return yaml_parser_fetch_key(parser)
|
||
+ }
|
||
+
|
||
+ // Is it the value indicator?
|
||
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
|
||
+ return yaml_parser_fetch_value(parser)
|
||
+ }
|
||
+
|
||
+ // Is it an alias?
|
||
+ if parser.buffer[parser.buffer_pos] == '*' {
|
||
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
|
||
+ }
|
||
+
|
||
+ // Is it an anchor?
|
||
+ if parser.buffer[parser.buffer_pos] == '&' {
|
||
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
|
||
+ }
|
||
+
|
||
+ // Is it a tag?
|
||
+ if parser.buffer[parser.buffer_pos] == '!' {
|
||
+ return yaml_parser_fetch_tag(parser)
|
||
+ }
|
||
+
|
||
+ // Is it a literal scalar?
|
||
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
|
||
+ return yaml_parser_fetch_block_scalar(parser, true)
|
||
+ }
|
||
+
|
||
+ // Is it a folded scalar?
|
||
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
|
||
+ return yaml_parser_fetch_block_scalar(parser, false)
|
||
+ }
|
||
+
|
||
+ // Is it a single-quoted scalar?
|
||
+ if parser.buffer[parser.buffer_pos] == '\'' {
|
||
+ return yaml_parser_fetch_flow_scalar(parser, true)
|
||
+ }
|
||
+
|
||
+ // Is it a double-quoted scalar?
|
||
+ if parser.buffer[parser.buffer_pos] == '"' {
|
||
+ return yaml_parser_fetch_flow_scalar(parser, false)
|
||
+ }
|
||
+
|
||
+ // Is it a plain scalar?
|
||
+ //
|
||
+ // A plain scalar may start with any non-blank characters except
|
||
+ //
|
||
+ // '-', '?', ':', ',', '[', ']', '{', '}',
|
||
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
|
||
+ // '%', '@', '`'.
|
||
+ //
|
||
+ // In the block context (and, for the '-' indicator, in the flow context
|
||
+ // too), it may also start with the characters
|
||
+ //
|
||
+ // '-', '?', ':'
|
||
+ //
|
||
+ // if it is followed by a non-space character.
|
||
+ //
|
||
+ // The last rule is more restrictive than the specification requires.
|
||
+ // [Go] TODO Make this logic more reasonable.
|
||
+ //switch parser.buffer[parser.buffer_pos] {
|
||
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
|
||
+ //}
|
||
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
|
||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
|
||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
|
||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
|
||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
|
||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
|
||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
|
||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
|
||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
|
||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
|
||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
|
||
+ (parser.flow_level == 0 &&
|
||
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
|
||
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
|
||
+ return yaml_parser_fetch_plain_scalar(parser)
|
||
+ }
|
||
+
|
||
+ // If we don't determine the token type so far, it is an error.
|
||
+ return yaml_parser_set_scanner_error(parser,
|
||
+ "while scanning for the next token", parser.mark,
|
||
+ "found character that cannot start any token")
|
||
+}
|
||
+
|
||
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
|
||
+ if !simple_key.possible {
|
||
+ return false, true
|
||
+ }
|
||
+
|
||
+ // The 1.2 specification says:
|
||
+ //
|
||
+ // "If the ? indicator is omitted, parsing needs to see past the
|
||
+ // implicit key to recognize it as such. To limit the amount of
|
||
+ // lookahead required, the “:” indicator must appear at most 1024
|
||
+ // Unicode characters beyond the start of the key. In addition, the key
|
||
+ // is restricted to a single line."
|
||
+ //
|
||
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
|
||
+ // Check if the potential simple key to be removed is required.
|
||
+ if simple_key.required {
|
||
+ return false, yaml_parser_set_scanner_error(parser,
|
||
+ "while scanning a simple key", simple_key.mark,
|
||
+ "could not find expected ':'")
|
||
+ }
|
||
+ simple_key.possible = false
|
||
+ return false, true
|
||
+ }
|
||
+ return true, true
|
||
+}
|
||
+
|
||
+// Check if a simple key may start at the current position and add it if
|
||
+// needed.
|
||
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
|
||
+ // A simple key is required at the current position if the scanner is in
|
||
+ // the block context and the current column coincides with the indentation
|
||
+ // level.
|
||
+
|
||
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
|
||
+
|
||
+ //
|
||
+ // If the current position may start a simple key, save it.
|
||
+ //
|
||
+ if parser.simple_key_allowed {
|
||
+ simple_key := yaml_simple_key_t{
|
||
+ possible: true,
|
||
+ required: required,
|
||
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
|
||
+ mark: parser.mark,
|
||
+ }
|
||
+
|
||
+ if !yaml_parser_remove_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
|
||
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Remove a potential simple key at the current flow level.
|
||
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
|
||
+ i := len(parser.simple_keys) - 1
|
||
+ if parser.simple_keys[i].possible {
|
||
+ // If the key is required, it is an error.
|
||
+ if parser.simple_keys[i].required {
|
||
+ return yaml_parser_set_scanner_error(parser,
|
||
+ "while scanning a simple key", parser.simple_keys[i].mark,
|
||
+ "could not find expected ':'")
|
||
+ }
|
||
+ // Remove the key from the stack.
|
||
+ parser.simple_keys[i].possible = false
|
||
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// max_flow_level limits the flow_level
|
||
+const max_flow_level = 10000
|
||
+
|
||
+// Increase the flow level and resize the simple key list if needed.
|
||
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
|
||
+ // Reset the simple key on the next level.
|
||
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
|
||
+ possible: false,
|
||
+ required: false,
|
||
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
|
||
+ mark: parser.mark,
|
||
+ })
|
||
+
|
||
+ // Increase the flow level.
|
||
+ parser.flow_level++
|
||
+ if parser.flow_level > max_flow_level {
|
||
+ return yaml_parser_set_scanner_error(parser,
|
||
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
|
||
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Decrease the flow level.
|
||
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
|
||
+ if parser.flow_level > 0 {
|
||
+ parser.flow_level--
|
||
+ last := len(parser.simple_keys) - 1
|
||
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
|
||
+ parser.simple_keys = parser.simple_keys[:last]
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// max_indents limits the indents stack size
|
||
+const max_indents = 10000
|
||
+
|
||
+// Push the current indentation level to the stack and set the new level
|
||
+// the current column is greater than the indentation level. In this case,
|
||
+// append or insert the specified token into the token queue.
|
||
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
|
||
+ // In the flow context, do nothing.
|
||
+ if parser.flow_level > 0 {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ if parser.indent < column {
|
||
+ // Push the current indentation level to the stack and set the new
|
||
+ // indentation level.
|
||
+ parser.indents = append(parser.indents, parser.indent)
|
||
+ parser.indent = column
|
||
+ if len(parser.indents) > max_indents {
|
||
+ return yaml_parser_set_scanner_error(parser,
|
||
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
|
||
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
|
||
+ }
|
||
+
|
||
+ // Create a token and insert it into the queue.
|
||
+ token := yaml_token_t{
|
||
+ typ: typ,
|
||
+ start_mark: mark,
|
||
+ end_mark: mark,
|
||
+ }
|
||
+ if number > -1 {
|
||
+ number -= parser.tokens_parsed
|
||
+ }
|
||
+ yaml_insert_token(parser, number, &token)
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Pop indentation levels from the indents stack until the current level
|
||
+// becomes less or equal to the column. For each indentation level, append
|
||
+// the BLOCK-END token.
|
||
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
|
||
+ // In the flow context, do nothing.
|
||
+ if parser.flow_level > 0 {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ block_mark := scan_mark
|
||
+ block_mark.index--
|
||
+
|
||
+ // Loop through the indentation levels in the stack.
|
||
+ for parser.indent > column {
|
||
+
|
||
+ // [Go] Reposition the end token before potential following
|
||
+ // foot comments of parent blocks. For that, search
|
||
+ // backwards for recent comments that were at the same
|
||
+ // indent as the block that is ending now.
|
||
+ stop_index := block_mark.index
|
||
+ for i := len(parser.comments) - 1; i >= 0; i-- {
|
||
+ comment := &parser.comments[i]
|
||
+
|
||
+ if comment.end_mark.index < stop_index {
|
||
+ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
|
||
+ // If requested indent column is < 0, then the document is over and everything else
|
||
+ // is a foot anyway.
|
||
+ break
|
||
+ }
|
||
+ if comment.start_mark.column == parser.indent+1 {
|
||
+ // This is a good match. But maybe there's a former comment
|
||
+ // at that same indent level, so keep searching.
|
||
+ block_mark = comment.start_mark
|
||
+ }
|
||
+
|
||
+ // While the end of the former comment matches with
|
||
+ // the start of the following one, we know there's
|
||
+ // nothing in between and scanning is still safe.
|
||
+ stop_index = comment.scan_mark.index
|
||
+ }
|
||
+
|
||
+ // Create a token and append it to the queue.
|
||
+ token := yaml_token_t{
|
||
+ typ: yaml_BLOCK_END_TOKEN,
|
||
+ start_mark: block_mark,
|
||
+ end_mark: block_mark,
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+
|
||
+ // Pop the indentation level.
|
||
+ parser.indent = parser.indents[len(parser.indents)-1]
|
||
+ parser.indents = parser.indents[:len(parser.indents)-1]
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Initialize the scanner and produce the STREAM-START token.
|
||
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
|
||
+
|
||
+ // Set the initial indentation.
|
||
+ parser.indent = -1
|
||
+
|
||
+ // Initialize the simple key stack.
|
||
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
|
||
+
|
||
+ parser.simple_keys_by_tok = make(map[int]int)
|
||
+
|
||
+ // A simple key is allowed at the beginning of the stream.
|
||
+ parser.simple_key_allowed = true
|
||
+
|
||
+ // We have started.
|
||
+ parser.stream_start_produced = true
|
||
+
|
||
+ // Create the STREAM-START token and append it to the queue.
|
||
+ token := yaml_token_t{
|
||
+ typ: yaml_STREAM_START_TOKEN,
|
||
+ start_mark: parser.mark,
|
||
+ end_mark: parser.mark,
|
||
+ encoding: parser.encoding,
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the STREAM-END token and shut down the scanner.
|
||
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
|
||
+
|
||
+ // Force new line.
|
||
+ if parser.mark.column != 0 {
|
||
+ parser.mark.column = 0
|
||
+ parser.mark.line++
|
||
+ }
|
||
+
|
||
+ // Reset the indentation level.
|
||
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Reset simple keys.
|
||
+ if !yaml_parser_remove_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ parser.simple_key_allowed = false
|
||
+
|
||
+ // Create the STREAM-END token and append it to the queue.
|
||
+ token := yaml_token_t{
|
||
+ typ: yaml_STREAM_END_TOKEN,
|
||
+ start_mark: parser.mark,
|
||
+ end_mark: parser.mark,
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
|
||
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
|
||
+ // Reset the indentation level.
|
||
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Reset simple keys.
|
||
+ if !yaml_parser_remove_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ parser.simple_key_allowed = false
|
||
+
|
||
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
|
||
+ token := yaml_token_t{}
|
||
+ if !yaml_parser_scan_directive(parser, &token) {
|
||
+ return false
|
||
+ }
|
||
+ // Append the token to the queue.
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the DOCUMENT-START or DOCUMENT-END token.
|
||
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
|
||
+ // Reset the indentation level.
|
||
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Reset simple keys.
|
||
+ if !yaml_parser_remove_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ parser.simple_key_allowed = false
|
||
+
|
||
+ // Consume the token.
|
||
+ start_mark := parser.mark
|
||
+
|
||
+ skip(parser)
|
||
+ skip(parser)
|
||
+ skip(parser)
|
||
+
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create the DOCUMENT-START or DOCUMENT-END token.
|
||
+ token := yaml_token_t{
|
||
+ typ: typ,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ }
|
||
+ // Append the token to the queue.
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
|
||
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
|
||
+
|
||
+ // The indicators '[' and '{' may start a simple key.
|
||
+ if !yaml_parser_save_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Increase the flow level.
|
||
+ if !yaml_parser_increase_flow_level(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // A simple key may follow the indicators '[' and '{'.
|
||
+ parser.simple_key_allowed = true
|
||
+
|
||
+ // Consume the token.
|
||
+ start_mark := parser.mark
|
||
+ skip(parser)
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
|
||
+ token := yaml_token_t{
|
||
+ typ: typ,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ }
|
||
+ // Append the token to the queue.
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
|
||
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
|
||
+ // Reset any potential simple key on the current flow level.
|
||
+ if !yaml_parser_remove_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Decrease the flow level.
|
||
+ if !yaml_parser_decrease_flow_level(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // No simple keys after the indicators ']' and '}'.
|
||
+ parser.simple_key_allowed = false
|
||
+
|
||
+ // Consume the token.
|
||
+
|
||
+ start_mark := parser.mark
|
||
+ skip(parser)
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
|
||
+ token := yaml_token_t{
|
||
+ typ: typ,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ }
|
||
+ // Append the token to the queue.
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the FLOW-ENTRY token.
|
||
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
|
||
+ // Reset any potential simple keys on the current flow level.
|
||
+ if !yaml_parser_remove_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Simple keys are allowed after ','.
|
||
+ parser.simple_key_allowed = true
|
||
+
|
||
+ // Consume the token.
|
||
+ start_mark := parser.mark
|
||
+ skip(parser)
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create the FLOW-ENTRY token and append it to the queue.
|
||
+ token := yaml_token_t{
|
||
+ typ: yaml_FLOW_ENTRY_TOKEN,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the BLOCK-ENTRY token.
|
||
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
|
||
+ // Check if the scanner is in the block context.
|
||
+ if parser.flow_level == 0 {
|
||
+ // Check if we are allowed to start a new entry.
|
||
+ if !parser.simple_key_allowed {
|
||
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
|
||
+ "block sequence entries are not allowed in this context")
|
||
+ }
|
||
+ // Add the BLOCK-SEQUENCE-START token if needed.
|
||
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
|
||
+ return false
|
||
+ }
|
||
+ } else {
|
||
+ // It is an error for the '-' indicator to occur in the flow context,
|
||
+ // but we let the Parser detect and report about it because the Parser
|
||
+ // is able to point to the context.
|
||
+ }
|
||
+
|
||
+ // Reset any potential simple keys on the current flow level.
|
||
+ if !yaml_parser_remove_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Simple keys are allowed after '-'.
|
||
+ parser.simple_key_allowed = true
|
||
+
|
||
+ // Consume the token.
|
||
+ start_mark := parser.mark
|
||
+ skip(parser)
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create the BLOCK-ENTRY token and append it to the queue.
|
||
+ token := yaml_token_t{
|
||
+ typ: yaml_BLOCK_ENTRY_TOKEN,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the KEY token.
|
||
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
|
||
+
|
||
+ // In the block context, additional checks are required.
|
||
+ if parser.flow_level == 0 {
|
||
+ // Check if we are allowed to start a new key (not nessesary simple).
|
||
+ if !parser.simple_key_allowed {
|
||
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
|
||
+ "mapping keys are not allowed in this context")
|
||
+ }
|
||
+ // Add the BLOCK-MAPPING-START token if needed.
|
||
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Reset any potential simple keys on the current flow level.
|
||
+ if !yaml_parser_remove_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Simple keys are allowed after '?' in the block context.
|
||
+ parser.simple_key_allowed = parser.flow_level == 0
|
||
+
|
||
+ // Consume the token.
|
||
+ start_mark := parser.mark
|
||
+ skip(parser)
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create the KEY token and append it to the queue.
|
||
+ token := yaml_token_t{
|
||
+ typ: yaml_KEY_TOKEN,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the VALUE token.
|
||
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
|
||
+
|
||
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
|
||
+
|
||
+ // Have we found a simple key?
|
||
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
|
||
+ return false
|
||
+
|
||
+ } else if valid {
|
||
+
|
||
+ // Create the KEY token and insert it into the queue.
|
||
+ token := yaml_token_t{
|
||
+ typ: yaml_KEY_TOKEN,
|
||
+ start_mark: simple_key.mark,
|
||
+ end_mark: simple_key.mark,
|
||
+ }
|
||
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
|
||
+
|
||
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
|
||
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
|
||
+ simple_key.token_number,
|
||
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Remove the simple key.
|
||
+ simple_key.possible = false
|
||
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
|
||
+
|
||
+ // A simple key cannot follow another simple key.
|
||
+ parser.simple_key_allowed = false
|
||
+
|
||
+ } else {
|
||
+ // The ':' indicator follows a complex key.
|
||
+
|
||
+ // In the block context, extra checks are required.
|
||
+ if parser.flow_level == 0 {
|
||
+
|
||
+ // Check if we are allowed to start a complex value.
|
||
+ if !parser.simple_key_allowed {
|
||
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
|
||
+ "mapping values are not allowed in this context")
|
||
+ }
|
||
+
|
||
+ // Add the BLOCK-MAPPING-START token if needed.
|
||
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Simple keys after ':' are allowed in the block context.
|
||
+ parser.simple_key_allowed = parser.flow_level == 0
|
||
+ }
|
||
+
|
||
+ // Consume the token.
|
||
+ start_mark := parser.mark
|
||
+ skip(parser)
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create the VALUE token and append it to the queue.
|
||
+ token := yaml_token_t{
|
||
+ typ: yaml_VALUE_TOKEN,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the ALIAS or ANCHOR token.
|
||
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
|
||
+ // An anchor or an alias could be a simple key.
|
||
+ if !yaml_parser_save_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // A simple key cannot follow an anchor or an alias.
|
||
+ parser.simple_key_allowed = false
|
||
+
|
||
+ // Create the ALIAS or ANCHOR token and append it to the queue.
|
||
+ var token yaml_token_t
|
||
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
|
||
+ return false
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the TAG token.
|
||
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
|
||
+ // A tag could be a simple key.
|
||
+ if !yaml_parser_save_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // A simple key cannot follow a tag.
|
||
+ parser.simple_key_allowed = false
|
||
+
|
||
+ // Create the TAG token and append it to the queue.
|
||
+ var token yaml_token_t
|
||
+ if !yaml_parser_scan_tag(parser, &token) {
|
||
+ return false
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
|
||
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
|
||
+ // Remove any potential simple keys.
|
||
+ if !yaml_parser_remove_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // A simple key may follow a block scalar.
|
||
+ parser.simple_key_allowed = true
|
||
+
|
||
+ // Create the SCALAR token and append it to the queue.
|
||
+ var token yaml_token_t
|
||
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
|
||
+ return false
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
|
||
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
|
||
+ // A plain scalar could be a simple key.
|
||
+ if !yaml_parser_save_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // A simple key cannot follow a flow scalar.
|
||
+ parser.simple_key_allowed = false
|
||
+
|
||
+ // Create the SCALAR token and append it to the queue.
|
||
+ var token yaml_token_t
|
||
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
|
||
+ return false
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Produce the SCALAR(...,plain) token.
|
||
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
|
||
+ // A plain scalar could be a simple key.
|
||
+ if !yaml_parser_save_simple_key(parser) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // A simple key cannot follow a flow scalar.
|
||
+ parser.simple_key_allowed = false
|
||
+
|
||
+ // Create the SCALAR token and append it to the queue.
|
||
+ var token yaml_token_t
|
||
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
|
||
+ return false
|
||
+ }
|
||
+ yaml_insert_token(parser, -1, &token)
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Eat whitespaces and comments until the next token is found.
|
||
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
|
||
+
|
||
+ scan_mark := parser.mark
|
||
+
|
||
+ // Until the next token is not found.
|
||
+ for {
|
||
+ // Allow the BOM mark to start a line.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
|
||
+ skip(parser)
|
||
+ }
|
||
+
|
||
+ // Eat whitespaces.
|
||
+ // Tabs are allowed:
|
||
+ // - in the flow context
|
||
+ // - in the block context, but not at the beginning of the line or
|
||
+ // after '-', '?', or ':' (complex value).
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
|
||
+ skip(parser)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Check if we just had a line comment under a sequence entry that
|
||
+ // looks more like a header to the following content. Similar to this:
|
||
+ //
|
||
+ // - # The comment
|
||
+ // - Some data
|
||
+ //
|
||
+ // If so, transform the line comment to a head comment and reposition.
|
||
+ if len(parser.comments) > 0 && len(parser.tokens) > 1 {
|
||
+ tokenA := parser.tokens[len(parser.tokens)-2]
|
||
+ tokenB := parser.tokens[len(parser.tokens)-1]
|
||
+ comment := &parser.comments[len(parser.comments)-1]
|
||
+ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
|
||
+ // If it was in the prior line, reposition so it becomes a
|
||
+ // header of the follow up token. Otherwise, keep it in place
|
||
+ // so it becomes a header of the former.
|
||
+ comment.head = comment.line
|
||
+ comment.line = nil
|
||
+ if comment.start_mark.line == parser.mark.line-1 {
|
||
+ comment.token_mark = parser.mark
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Eat a comment until a line break.
|
||
+ if parser.buffer[parser.buffer_pos] == '#' {
|
||
+ if !yaml_parser_scan_comments(parser, scan_mark) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // If it is a line break, eat it.
|
||
+ if is_break(parser.buffer, parser.buffer_pos) {
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+ skip_line(parser)
|
||
+
|
||
+ // In the block context, a new line may start a simple key.
|
||
+ if parser.flow_level == 0 {
|
||
+ parser.simple_key_allowed = true
|
||
+ }
|
||
+ } else {
|
||
+ break // We have found a token.
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
|
||
+//
|
||
+// Scope:
|
||
+// %YAML 1.1 # a comment \n
|
||
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||
+// %TAG !yaml! tag:yaml.org,2002: \n
|
||
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||
+//
|
||
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
|
||
+ // Eat '%'.
|
||
+ start_mark := parser.mark
|
||
+ skip(parser)
|
||
+
|
||
+ // Scan the directive name.
|
||
+ var name []byte
|
||
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Is it a YAML directive?
|
||
+ if bytes.Equal(name, []byte("YAML")) {
|
||
+ // Scan the VERSION directive value.
|
||
+ var major, minor int8
|
||
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
|
||
+ return false
|
||
+ }
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create a VERSION-DIRECTIVE token.
|
||
+ *token = yaml_token_t{
|
||
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ major: major,
|
||
+ minor: minor,
|
||
+ }
|
||
+
|
||
+ // Is it a TAG directive?
|
||
+ } else if bytes.Equal(name, []byte("TAG")) {
|
||
+ // Scan the TAG directive value.
|
||
+ var handle, prefix []byte
|
||
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
|
||
+ return false
|
||
+ }
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create a TAG-DIRECTIVE token.
|
||
+ *token = yaml_token_t{
|
||
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ value: handle,
|
||
+ prefix: prefix,
|
||
+ }
|
||
+
|
||
+ // Unknown directive.
|
||
+ } else {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
|
||
+ start_mark, "found unknown directive name")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Eat the rest of the line including any comments.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ for is_blank(parser.buffer, parser.buffer_pos) {
|
||
+ skip(parser)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if parser.buffer[parser.buffer_pos] == '#' {
|
||
+ // [Go] Discard this inline comment for the time being.
|
||
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
|
||
+ // return false
|
||
+ //}
|
||
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
|
||
+ skip(parser)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Check if we are at the end of the line.
|
||
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
|
||
+ start_mark, "did not find expected comment or line break")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Eat a line break.
|
||
+ if is_break(parser.buffer, parser.buffer_pos) {
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+ skip_line(parser)
|
||
+ }
|
||
+
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Scan the directive name.
|
||
+//
|
||
+// Scope:
|
||
+// %YAML 1.1 # a comment \n
|
||
+// ^^^^
|
||
+// %TAG !yaml! tag:yaml.org,2002: \n
|
||
+// ^^^
|
||
+//
|
||
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
|
||
+ // Consume the directive name.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ var s []byte
|
||
+ for is_alpha(parser.buffer, parser.buffer_pos) {
|
||
+ s = read(parser, s)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Check if the name is empty.
|
||
+ if len(s) == 0 {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
|
||
+ start_mark, "could not find expected directive name")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Check for an blank character after the name.
|
||
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
|
||
+ start_mark, "found unexpected non-alphabetical character")
|
||
+ return false
|
||
+ }
|
||
+ *name = s
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Scan the value of VERSION-DIRECTIVE.
|
||
+//
|
||
+// Scope:
|
||
+// %YAML 1.1 # a comment \n
|
||
+// ^^^^^^
|
||
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
|
||
+ // Eat whitespaces.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ for is_blank(parser.buffer, parser.buffer_pos) {
|
||
+ skip(parser)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Consume the major version number.
|
||
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Eat '.'.
|
||
+ if parser.buffer[parser.buffer_pos] != '.' {
|
||
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
|
||
+ start_mark, "did not find expected digit or '.' character")
|
||
+ }
|
||
+
|
||
+ skip(parser)
|
||
+
|
||
+ // Consume the minor version number.
|
||
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
|
||
+ return false
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+const max_number_length = 2
|
||
+
|
||
+// Scan the version number of VERSION-DIRECTIVE.
|
||
+//
|
||
+// Scope:
|
||
+// %YAML 1.1 # a comment \n
|
||
+// ^
|
||
+// %YAML 1.1 # a comment \n
|
||
+// ^
|
||
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
|
||
+
|
||
+ // Repeat while the next character is digit.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ var value, length int8
|
||
+ for is_digit(parser.buffer, parser.buffer_pos) {
|
||
+ // Check if the number is too long.
|
||
+ length++
|
||
+ if length > max_number_length {
|
||
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
|
||
+ start_mark, "found extremely long version number")
|
||
+ }
|
||
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
|
||
+ skip(parser)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Check if the number was present.
|
||
+ if length == 0 {
|
||
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
|
||
+ start_mark, "did not find expected version number")
|
||
+ }
|
||
+ *number = value
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Scan the value of a TAG-DIRECTIVE token.
|
||
+//
|
||
+// Scope:
|
||
+// %TAG !yaml! tag:yaml.org,2002: \n
|
||
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||
+//
|
||
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
|
||
+ var handle_value, prefix_value []byte
|
||
+
|
||
+ // Eat whitespaces.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ for is_blank(parser.buffer, parser.buffer_pos) {
|
||
+ skip(parser)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Scan a handle.
|
||
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Expect a whitespace.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ if !is_blank(parser.buffer, parser.buffer_pos) {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
|
||
+ start_mark, "did not find expected whitespace")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Eat whitespaces.
|
||
+ for is_blank(parser.buffer, parser.buffer_pos) {
|
||
+ skip(parser)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Scan a prefix.
|
||
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Expect a whitespace or line break.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
|
||
+ start_mark, "did not find expected whitespace or line break")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ *handle = handle_value
|
||
+ *prefix = prefix_value
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
|
||
+ var s []byte
|
||
+
|
||
+ // Eat the indicator character.
|
||
+ start_mark := parser.mark
|
||
+ skip(parser)
|
||
+
|
||
+ // Consume the value.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ for is_alpha(parser.buffer, parser.buffer_pos) {
|
||
+ s = read(parser, s)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ /*
|
||
+ * Check if length of the anchor is greater than 0 and it is followed by
|
||
+ * a whitespace character or one of the indicators:
|
||
+ *
|
||
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
|
||
+ */
|
||
+
|
||
+ if len(s) == 0 ||
|
||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
|
||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
|
||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
|
||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
|
||
+ parser.buffer[parser.buffer_pos] == '`') {
|
||
+ context := "while scanning an alias"
|
||
+ if typ == yaml_ANCHOR_TOKEN {
|
||
+ context = "while scanning an anchor"
|
||
+ }
|
||
+ yaml_parser_set_scanner_error(parser, context, start_mark,
|
||
+ "did not find expected alphabetic or numeric character")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Create a token.
|
||
+ *token = yaml_token_t{
|
||
+ typ: typ,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ value: s,
|
||
+ }
|
||
+
|
||
+ return true
|
||
+}
|
||
+
|
||
+/*
|
||
+ * Scan a TAG token.
|
||
+ */
|
||
+
|
||
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
|
||
+ var handle, suffix []byte
|
||
+
|
||
+ start_mark := parser.mark
|
||
+
|
||
+ // Check if the tag is in the canonical form.
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if parser.buffer[parser.buffer_pos+1] == '<' {
|
||
+ // Keep the handle as ''
|
||
+
|
||
+ // Eat '!<'
|
||
+ skip(parser)
|
||
+ skip(parser)
|
||
+
|
||
+ // Consume the tag value.
|
||
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Check for '>' and eat it.
|
||
+ if parser.buffer[parser.buffer_pos] != '>' {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
|
||
+ start_mark, "did not find the expected '>'")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ skip(parser)
|
||
+ } else {
|
||
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
|
||
+
|
||
+ // First, try to scan a handle.
|
||
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Check if it is, indeed, handle.
|
||
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
|
||
+ // Scan the suffix now.
|
||
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
|
||
+ return false
|
||
+ }
|
||
+ } else {
|
||
+ // It wasn't a handle after all. Scan the rest of the tag.
|
||
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Set the handle to '!'.
|
||
+ handle = []byte{'!'}
|
||
+
|
||
+ // A special case: the '!' tag. Set the handle to '' and the
|
||
+ // suffix to '!'.
|
||
+ if len(suffix) == 0 {
|
||
+ handle, suffix = suffix, handle
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Check the character which ends the tag.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
|
||
+ start_mark, "did not find expected whitespace or line break")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create a token.
|
||
+ *token = yaml_token_t{
|
||
+ typ: yaml_TAG_TOKEN,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ value: handle,
|
||
+ suffix: suffix,
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Scan a tag handle.
|
||
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
|
||
+ // Check the initial '!' character.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ if parser.buffer[parser.buffer_pos] != '!' {
|
||
+ yaml_parser_set_scanner_tag_error(parser, directive,
|
||
+ start_mark, "did not find expected '!'")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ var s []byte
|
||
+
|
||
+ // Copy the '!' character.
|
||
+ s = read(parser, s)
|
||
+
|
||
+ // Copy all subsequent alphabetical and numerical characters.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ for is_alpha(parser.buffer, parser.buffer_pos) {
|
||
+ s = read(parser, s)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Check if the trailing character is '!' and copy it.
|
||
+ if parser.buffer[parser.buffer_pos] == '!' {
|
||
+ s = read(parser, s)
|
||
+ } else {
|
||
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
|
||
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
|
||
+ if directive && string(s) != "!" {
|
||
+ yaml_parser_set_scanner_tag_error(parser, directive,
|
||
+ start_mark, "did not find expected '!'")
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ *handle = s
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Scan a tag.
|
||
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
|
||
+ //size_t length = head ? strlen((char *)head) : 0
|
||
+ var s []byte
|
||
+ hasTag := len(head) > 0
|
||
+
|
||
+ // Copy the head if needed.
|
||
+ //
|
||
+ // Note that we don't copy the leading '!' character.
|
||
+ if len(head) > 1 {
|
||
+ s = append(s, head[1:]...)
|
||
+ }
|
||
+
|
||
+ // Scan the tag.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // The set of characters that may appear in URI is as follows:
|
||
+ //
|
||
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
|
||
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
|
||
+ // '%'.
|
||
+ // [Go] TODO Convert this into more reasonable logic.
|
||
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
|
||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
|
||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
|
||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
|
||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
|
||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
|
||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
|
||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
|
||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
|
||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
|
||
+ parser.buffer[parser.buffer_pos] == '%' {
|
||
+ // Check if it is a URI-escape sequence.
|
||
+ if parser.buffer[parser.buffer_pos] == '%' {
|
||
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
|
||
+ return false
|
||
+ }
|
||
+ } else {
|
||
+ s = read(parser, s)
|
||
+ }
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ hasTag = true
|
||
+ }
|
||
+
|
||
+ if !hasTag {
|
||
+ yaml_parser_set_scanner_tag_error(parser, directive,
|
||
+ start_mark, "did not find expected tag URI")
|
||
+ return false
|
||
+ }
|
||
+ *uri = s
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
|
||
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
|
||
+
|
||
+ // Decode the required number of characters.
|
||
+ w := 1024
|
||
+ for w > 0 {
|
||
+ // Check for a URI-escaped octet.
|
||
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
|
||
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
|
||
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
|
||
+ return yaml_parser_set_scanner_tag_error(parser, directive,
|
||
+ start_mark, "did not find URI escaped octet")
|
||
+ }
|
||
+
|
||
+ // Get the octet.
|
||
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
|
||
+
|
||
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
|
||
+ if w == 1024 {
|
||
+ w = width(octet)
|
||
+ if w == 0 {
|
||
+ return yaml_parser_set_scanner_tag_error(parser, directive,
|
||
+ start_mark, "found an incorrect leading UTF-8 octet")
|
||
+ }
|
||
+ } else {
|
||
+ // Check if the trailing octet is correct.
|
||
+ if octet&0xC0 != 0x80 {
|
||
+ return yaml_parser_set_scanner_tag_error(parser, directive,
|
||
+ start_mark, "found an incorrect trailing UTF-8 octet")
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Copy the octet and move the pointers.
|
||
+ *s = append(*s, octet)
|
||
+ skip(parser)
|
||
+ skip(parser)
|
||
+ skip(parser)
|
||
+ w--
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Scan a block scalar.
|
||
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
|
||
+ // Eat the indicator '|' or '>'.
|
||
+ start_mark := parser.mark
|
||
+ skip(parser)
|
||
+
|
||
+ // Scan the additional block scalar indicators.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Check for a chomping indicator.
|
||
+ var chomping, increment int
|
||
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
|
||
+ // Set the chomping method and eat the indicator.
|
||
+ if parser.buffer[parser.buffer_pos] == '+' {
|
||
+ chomping = +1
|
||
+ } else {
|
||
+ chomping = -1
|
||
+ }
|
||
+ skip(parser)
|
||
+
|
||
+ // Check for an indentation indicator.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ if is_digit(parser.buffer, parser.buffer_pos) {
|
||
+ // Check that the indentation is greater than 0.
|
||
+ if parser.buffer[parser.buffer_pos] == '0' {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
|
||
+ start_mark, "found an indentation indicator equal to 0")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Get the indentation level and eat the indicator.
|
||
+ increment = as_digit(parser.buffer, parser.buffer_pos)
|
||
+ skip(parser)
|
||
+ }
|
||
+
|
||
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
|
||
+ // Do the same as above, but in the opposite order.
|
||
+
|
||
+ if parser.buffer[parser.buffer_pos] == '0' {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
|
||
+ start_mark, "found an indentation indicator equal to 0")
|
||
+ return false
|
||
+ }
|
||
+ increment = as_digit(parser.buffer, parser.buffer_pos)
|
||
+ skip(parser)
|
||
+
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
|
||
+ if parser.buffer[parser.buffer_pos] == '+' {
|
||
+ chomping = +1
|
||
+ } else {
|
||
+ chomping = -1
|
||
+ }
|
||
+ skip(parser)
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Eat whitespaces and comments to the end of the line.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ for is_blank(parser.buffer, parser.buffer_pos) {
|
||
+ skip(parser)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if parser.buffer[parser.buffer_pos] == '#' {
|
||
+ if !yaml_parser_scan_line_comment(parser, start_mark) {
|
||
+ return false
|
||
+ }
|
||
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
|
||
+ skip(parser)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Check if we are at the end of the line.
|
||
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
|
||
+ start_mark, "did not find expected comment or line break")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Eat a line break.
|
||
+ if is_break(parser.buffer, parser.buffer_pos) {
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+ skip_line(parser)
|
||
+ }
|
||
+
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Set the indentation level if it was specified.
|
||
+ var indent int
|
||
+ if increment > 0 {
|
||
+ if parser.indent >= 0 {
|
||
+ indent = parser.indent + increment
|
||
+ } else {
|
||
+ indent = increment
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Scan the leading line breaks and determine the indentation level if needed.
|
||
+ var s, leading_break, trailing_breaks []byte
|
||
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Scan the block scalar content.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ var leading_blank, trailing_blank bool
|
||
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
|
||
+ // We are at the beginning of a non-empty line.
|
||
+
|
||
+ // Is it a trailing whitespace?
|
||
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
|
||
+
|
||
+ // Check if we need to fold the leading line break.
|
||
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
|
||
+ // Do we need to join the lines by space?
|
||
+ if len(trailing_breaks) == 0 {
|
||
+ s = append(s, ' ')
|
||
+ }
|
||
+ } else {
|
||
+ s = append(s, leading_break...)
|
||
+ }
|
||
+ leading_break = leading_break[:0]
|
||
+
|
||
+ // Append the remaining line breaks.
|
||
+ s = append(s, trailing_breaks...)
|
||
+ trailing_breaks = trailing_breaks[:0]
|
||
+
|
||
+ // Is it a leading whitespace?
|
||
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
|
||
+
|
||
+ // Consume the current line.
|
||
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
|
||
+ s = read(parser, s)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Consume the line break.
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ leading_break = read_line(parser, leading_break)
|
||
+
|
||
+ // Eat the following indentation spaces and line breaks.
|
||
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Chomp the tail.
|
||
+ if chomping != -1 {
|
||
+ s = append(s, leading_break...)
|
||
+ }
|
||
+ if chomping == 1 {
|
||
+ s = append(s, trailing_breaks...)
|
||
+ }
|
||
+
|
||
+ // Create a token.
|
||
+ *token = yaml_token_t{
|
||
+ typ: yaml_SCALAR_TOKEN,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ value: s,
|
||
+ style: yaml_LITERAL_SCALAR_STYLE,
|
||
+ }
|
||
+ if !literal {
|
||
+ token.style = yaml_FOLDED_SCALAR_STYLE
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Scan indentation spaces and line breaks for a block scalar. Determine the
|
||
+// indentation level if needed.
|
||
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
|
||
+ *end_mark = parser.mark
|
||
+
|
||
+ // Eat the indentation spaces and line breaks.
|
||
+ max_indent := 0
|
||
+ for {
|
||
+ // Eat the indentation spaces.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
|
||
+ skip(parser)
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ if parser.mark.column > max_indent {
|
||
+ max_indent = parser.mark.column
|
||
+ }
|
||
+
|
||
+ // Check for a tab character messing the indentation.
|
||
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
|
||
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
|
||
+ start_mark, "found a tab character where an indentation space is expected")
|
||
+ }
|
||
+
|
||
+ // Have we found a non-empty line?
|
||
+ if !is_break(parser.buffer, parser.buffer_pos) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // Consume the line break.
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+ // [Go] Should really be returning breaks instead.
|
||
+ *breaks = read_line(parser, *breaks)
|
||
+ *end_mark = parser.mark
|
||
+ }
|
||
+
|
||
+ // Determine the indentation level if needed.
|
||
+ if *indent == 0 {
|
||
+ *indent = max_indent
|
||
+ if *indent < parser.indent+1 {
|
||
+ *indent = parser.indent + 1
|
||
+ }
|
||
+ if *indent < 1 {
|
||
+ *indent = 1
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Scan a quoted scalar.
|
||
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
|
||
+ // Eat the left quote.
|
||
+ start_mark := parser.mark
|
||
+ skip(parser)
|
||
+
|
||
+ // Consume the content of the quoted scalar.
|
||
+ var s, leading_break, trailing_breaks, whitespaces []byte
|
||
+ for {
|
||
+ // Check that there are no document indicators at the beginning of the line.
|
||
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if parser.mark.column == 0 &&
|
||
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
|
||
+ parser.buffer[parser.buffer_pos+1] == '-' &&
|
||
+ parser.buffer[parser.buffer_pos+2] == '-') ||
|
||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
|
||
+ parser.buffer[parser.buffer_pos+1] == '.' &&
|
||
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
|
||
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
|
||
+ start_mark, "found unexpected document indicator")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Check for EOF.
|
||
+ if is_z(parser.buffer, parser.buffer_pos) {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
|
||
+ start_mark, "found unexpected end of stream")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Consume non-blank characters.
|
||
+ leading_blanks := false
|
||
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
|
||
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
|
||
+ // Is is an escaped single quote.
|
||
+ s = append(s, '\'')
|
||
+ skip(parser)
|
||
+ skip(parser)
|
||
+
|
||
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
|
||
+ // It is a right single quote.
|
||
+ break
|
||
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
|
||
+ // It is a right double quote.
|
||
+ break
|
||
+
|
||
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
|
||
+ // It is an escaped line break.
|
||
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
|
||
+ return false
|
||
+ }
|
||
+ skip(parser)
|
||
+ skip_line(parser)
|
||
+ leading_blanks = true
|
||
+ break
|
||
+
|
||
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
|
||
+ // It is an escape sequence.
|
||
+ code_length := 0
|
||
+
|
||
+ // Check the escape character.
|
||
+ switch parser.buffer[parser.buffer_pos+1] {
|
||
+ case '0':
|
||
+ s = append(s, 0)
|
||
+ case 'a':
|
||
+ s = append(s, '\x07')
|
||
+ case 'b':
|
||
+ s = append(s, '\x08')
|
||
+ case 't', '\t':
|
||
+ s = append(s, '\x09')
|
||
+ case 'n':
|
||
+ s = append(s, '\x0A')
|
||
+ case 'v':
|
||
+ s = append(s, '\x0B')
|
||
+ case 'f':
|
||
+ s = append(s, '\x0C')
|
||
+ case 'r':
|
||
+ s = append(s, '\x0D')
|
||
+ case 'e':
|
||
+ s = append(s, '\x1B')
|
||
+ case ' ':
|
||
+ s = append(s, '\x20')
|
||
+ case '"':
|
||
+ s = append(s, '"')
|
||
+ case '\'':
|
||
+ s = append(s, '\'')
|
||
+ case '\\':
|
||
+ s = append(s, '\\')
|
||
+ case 'N': // NEL (#x85)
|
||
+ s = append(s, '\xC2')
|
||
+ s = append(s, '\x85')
|
||
+ case '_': // #xA0
|
||
+ s = append(s, '\xC2')
|
||
+ s = append(s, '\xA0')
|
||
+ case 'L': // LS (#x2028)
|
||
+ s = append(s, '\xE2')
|
||
+ s = append(s, '\x80')
|
||
+ s = append(s, '\xA8')
|
||
+ case 'P': // PS (#x2029)
|
||
+ s = append(s, '\xE2')
|
||
+ s = append(s, '\x80')
|
||
+ s = append(s, '\xA9')
|
||
+ case 'x':
|
||
+ code_length = 2
|
||
+ case 'u':
|
||
+ code_length = 4
|
||
+ case 'U':
|
||
+ code_length = 8
|
||
+ default:
|
||
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
|
||
+ start_mark, "found unknown escape character")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ skip(parser)
|
||
+ skip(parser)
|
||
+
|
||
+ // Consume an arbitrary escape code.
|
||
+ if code_length > 0 {
|
||
+ var value int
|
||
+
|
||
+ // Scan the character value.
|
||
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
|
||
+ return false
|
||
+ }
|
||
+ for k := 0; k < code_length; k++ {
|
||
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
|
||
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
|
||
+ start_mark, "did not find expected hexdecimal number")
|
||
+ return false
|
||
+ }
|
||
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
|
||
+ }
|
||
+
|
||
+ // Check the value and write the character.
|
||
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
|
||
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
|
||
+ start_mark, "found invalid Unicode character escape code")
|
||
+ return false
|
||
+ }
|
||
+ if value <= 0x7F {
|
||
+ s = append(s, byte(value))
|
||
+ } else if value <= 0x7FF {
|
||
+ s = append(s, byte(0xC0+(value>>6)))
|
||
+ s = append(s, byte(0x80+(value&0x3F)))
|
||
+ } else if value <= 0xFFFF {
|
||
+ s = append(s, byte(0xE0+(value>>12)))
|
||
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
|
||
+ s = append(s, byte(0x80+(value&0x3F)))
|
||
+ } else {
|
||
+ s = append(s, byte(0xF0+(value>>18)))
|
||
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
|
||
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
|
||
+ s = append(s, byte(0x80+(value&0x3F)))
|
||
+ }
|
||
+
|
||
+ // Advance the pointer.
|
||
+ for k := 0; k < code_length; k++ {
|
||
+ skip(parser)
|
||
+ }
|
||
+ }
|
||
+ } else {
|
||
+ // It is a non-escaped non-blank character.
|
||
+ s = read(parser, s)
|
||
+ }
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Check if we are at the end of the scalar.
|
||
+ if single {
|
||
+ if parser.buffer[parser.buffer_pos] == '\'' {
|
||
+ break
|
||
+ }
|
||
+ } else {
|
||
+ if parser.buffer[parser.buffer_pos] == '"' {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Consume blank characters.
|
||
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
|
||
+ if is_blank(parser.buffer, parser.buffer_pos) {
|
||
+ // Consume a space or a tab character.
|
||
+ if !leading_blanks {
|
||
+ whitespaces = read(parser, whitespaces)
|
||
+ } else {
|
||
+ skip(parser)
|
||
+ }
|
||
+ } else {
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Check if it is a first line break.
|
||
+ if !leading_blanks {
|
||
+ whitespaces = whitespaces[:0]
|
||
+ leading_break = read_line(parser, leading_break)
|
||
+ leading_blanks = true
|
||
+ } else {
|
||
+ trailing_breaks = read_line(parser, trailing_breaks)
|
||
+ }
|
||
+ }
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Join the whitespaces or fold line breaks.
|
||
+ if leading_blanks {
|
||
+ // Do we need to fold line breaks?
|
||
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
|
||
+ if len(trailing_breaks) == 0 {
|
||
+ s = append(s, ' ')
|
||
+ } else {
|
||
+ s = append(s, trailing_breaks...)
|
||
+ }
|
||
+ } else {
|
||
+ s = append(s, leading_break...)
|
||
+ s = append(s, trailing_breaks...)
|
||
+ }
|
||
+ trailing_breaks = trailing_breaks[:0]
|
||
+ leading_break = leading_break[:0]
|
||
+ } else {
|
||
+ s = append(s, whitespaces...)
|
||
+ whitespaces = whitespaces[:0]
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Eat the right quote.
|
||
+ skip(parser)
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Create a token.
|
||
+ *token = yaml_token_t{
|
||
+ typ: yaml_SCALAR_TOKEN,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ value: s,
|
||
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
|
||
+ }
|
||
+ if !single {
|
||
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+// Scan a plain scalar.
|
||
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
|
||
+
|
||
+ var s, leading_break, trailing_breaks, whitespaces []byte
|
||
+ var leading_blanks bool
|
||
+ var indent = parser.indent + 1
|
||
+
|
||
+ start_mark := parser.mark
|
||
+ end_mark := parser.mark
|
||
+
|
||
+ // Consume the content of the plain scalar.
|
||
+ for {
|
||
+ // Check for a document indicator.
|
||
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
|
||
+ return false
|
||
+ }
|
||
+ if parser.mark.column == 0 &&
|
||
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
|
||
+ parser.buffer[parser.buffer_pos+1] == '-' &&
|
||
+ parser.buffer[parser.buffer_pos+2] == '-') ||
|
||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
|
||
+ parser.buffer[parser.buffer_pos+1] == '.' &&
|
||
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
|
||
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // Check for a comment.
|
||
+ if parser.buffer[parser.buffer_pos] == '#' {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // Consume non-blank characters.
|
||
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
|
||
+
|
||
+ // Check for indicators that may end a plain scalar.
|
||
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
|
||
+ (parser.flow_level > 0 &&
|
||
+ (parser.buffer[parser.buffer_pos] == ',' ||
|
||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
|
||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
|
||
+ parser.buffer[parser.buffer_pos] == '}')) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // Check if we need to join whitespaces and breaks.
|
||
+ if leading_blanks || len(whitespaces) > 0 {
|
||
+ if leading_blanks {
|
||
+ // Do we need to fold line breaks?
|
||
+ if leading_break[0] == '\n' {
|
||
+ if len(trailing_breaks) == 0 {
|
||
+ s = append(s, ' ')
|
||
+ } else {
|
||
+ s = append(s, trailing_breaks...)
|
||
+ }
|
||
+ } else {
|
||
+ s = append(s, leading_break...)
|
||
+ s = append(s, trailing_breaks...)
|
||
+ }
|
||
+ trailing_breaks = trailing_breaks[:0]
|
||
+ leading_break = leading_break[:0]
|
||
+ leading_blanks = false
|
||
+ } else {
|
||
+ s = append(s, whitespaces...)
|
||
+ whitespaces = whitespaces[:0]
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Copy the character.
|
||
+ s = read(parser, s)
|
||
+
|
||
+ end_mark = parser.mark
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Is it the end?
|
||
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // Consume blank characters.
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
|
||
+ if is_blank(parser.buffer, parser.buffer_pos) {
|
||
+
|
||
+ // Check for tab characters that abuse indentation.
|
||
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
|
||
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
|
||
+ start_mark, "found a tab character that violates indentation")
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Consume a space or a tab character.
|
||
+ if !leading_blanks {
|
||
+ whitespaces = read(parser, whitespaces)
|
||
+ } else {
|
||
+ skip(parser)
|
||
+ }
|
||
+ } else {
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ // Check if it is a first line break.
|
||
+ if !leading_blanks {
|
||
+ whitespaces = whitespaces[:0]
|
||
+ leading_break = read_line(parser, leading_break)
|
||
+ leading_blanks = true
|
||
+ } else {
|
||
+ trailing_breaks = read_line(parser, trailing_breaks)
|
||
+ }
|
||
+ }
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Check indentation level.
|
||
+ if parser.flow_level == 0 && parser.mark.column < indent {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // Create a token.
|
||
+ *token = yaml_token_t{
|
||
+ typ: yaml_SCALAR_TOKEN,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: end_mark,
|
||
+ value: s,
|
||
+ style: yaml_PLAIN_SCALAR_STYLE,
|
||
+ }
|
||
+
|
||
+ // Note that we change the 'simple_key_allowed' flag.
|
||
+ if leading_blanks {
|
||
+ parser.simple_key_allowed = true
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
|
||
+ if parser.newlines > 0 {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ var start_mark yaml_mark_t
|
||
+ var text []byte
|
||
+
|
||
+ for peek := 0; peek < 512; peek++ {
|
||
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
|
||
+ break
|
||
+ }
|
||
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
|
||
+ continue
|
||
+ }
|
||
+ if parser.buffer[parser.buffer_pos+peek] == '#' {
|
||
+ seen := parser.mark.index+peek
|
||
+ for {
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ if is_breakz(parser.buffer, parser.buffer_pos) {
|
||
+ if parser.mark.index >= seen {
|
||
+ break
|
||
+ }
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+ skip_line(parser)
|
||
+ } else if parser.mark.index >= seen {
|
||
+ if len(text) == 0 {
|
||
+ start_mark = parser.mark
|
||
+ }
|
||
+ text = read(parser, text)
|
||
+ } else {
|
||
+ skip(parser)
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ break
|
||
+ }
|
||
+ if len(text) > 0 {
|
||
+ parser.comments = append(parser.comments, yaml_comment_t{
|
||
+ token_mark: token_mark,
|
||
+ start_mark: start_mark,
|
||
+ line: text,
|
||
+ })
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
|
||
+ token := parser.tokens[len(parser.tokens)-1]
|
||
+
|
||
+ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
|
||
+ token = parser.tokens[len(parser.tokens)-2]
|
||
+ }
|
||
+
|
||
+ var token_mark = token.start_mark
|
||
+ var start_mark yaml_mark_t
|
||
+ var next_indent = parser.indent
|
||
+ if next_indent < 0 {
|
||
+ next_indent = 0
|
||
+ }
|
||
+
|
||
+ var recent_empty = false
|
||
+ var first_empty = parser.newlines <= 1
|
||
+
|
||
+ var line = parser.mark.line
|
||
+ var column = parser.mark.column
|
||
+
|
||
+ var text []byte
|
||
+
|
||
+ // The foot line is the place where a comment must start to
|
||
+ // still be considered as a foot of the prior content.
|
||
+ // If there's some content in the currently parsed line, then
|
||
+ // the foot is the line below it.
|
||
+ var foot_line = -1
|
||
+ if scan_mark.line > 0 {
|
||
+ foot_line = parser.mark.line-parser.newlines+1
|
||
+ if parser.newlines == 0 && parser.mark.column > 1 {
|
||
+ foot_line++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ var peek = 0
|
||
+ for ; peek < 512; peek++ {
|
||
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
|
||
+ break
|
||
+ }
|
||
+ column++
|
||
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
|
||
+ continue
|
||
+ }
|
||
+ c := parser.buffer[parser.buffer_pos+peek]
|
||
+ var close_flow = parser.flow_level > 0 && (c == ']' || c == '}')
|
||
+ if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) {
|
||
+ // Got line break or terminator.
|
||
+ if close_flow || !recent_empty {
|
||
+ if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) {
|
||
+ // This is the first empty line and there were no empty lines before,
|
||
+ // so this initial part of the comment is a foot of the prior token
|
||
+ // instead of being a head for the following one. Split it up.
|
||
+ // Alternatively, this might also be the last comment inside a flow
|
||
+ // scope, so it must be a footer.
|
||
+ if len(text) > 0 {
|
||
+ if start_mark.column-1 < next_indent {
|
||
+ // If dedented it's unrelated to the prior token.
|
||
+ token_mark = start_mark
|
||
+ }
|
||
+ parser.comments = append(parser.comments, yaml_comment_t{
|
||
+ scan_mark: scan_mark,
|
||
+ token_mark: token_mark,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
|
||
+ foot: text,
|
||
+ })
|
||
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
|
||
+ token_mark = scan_mark
|
||
+ text = nil
|
||
+ }
|
||
+ } else {
|
||
+ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
|
||
+ text = append(text, '\n')
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ if !is_break(parser.buffer, parser.buffer_pos+peek) {
|
||
+ break
|
||
+ }
|
||
+ first_empty = false
|
||
+ recent_empty = true
|
||
+ column = 0
|
||
+ line++
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) {
|
||
+ // The comment at the different indentation is a foot of the
|
||
+ // preceding data rather than a head of the upcoming one.
|
||
+ parser.comments = append(parser.comments, yaml_comment_t{
|
||
+ scan_mark: scan_mark,
|
||
+ token_mark: token_mark,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
|
||
+ foot: text,
|
||
+ })
|
||
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
|
||
+ token_mark = scan_mark
|
||
+ text = nil
|
||
+ }
|
||
+
|
||
+ if parser.buffer[parser.buffer_pos+peek] != '#' {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ if len(text) == 0 {
|
||
+ start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
|
||
+ } else {
|
||
+ text = append(text, '\n')
|
||
+ }
|
||
+
|
||
+ recent_empty = false
|
||
+
|
||
+ // Consume until after the consumed comment line.
|
||
+ seen := parser.mark.index+peek
|
||
+ for {
|
||
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||
+ return false
|
||
+ }
|
||
+ if is_breakz(parser.buffer, parser.buffer_pos) {
|
||
+ if parser.mark.index >= seen {
|
||
+ break
|
||
+ }
|
||
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
|
||
+ return false
|
||
+ }
|
||
+ skip_line(parser)
|
||
+ } else if parser.mark.index >= seen {
|
||
+ text = read(parser, text)
|
||
+ } else {
|
||
+ skip(parser)
|
||
+ }
|
||
+ }
|
||
+
|
||
+ peek = 0
|
||
+ column = 0
|
||
+ line = parser.mark.line
|
||
+ next_indent = parser.indent
|
||
+ if next_indent < 0 {
|
||
+ next_indent = 0
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if len(text) > 0 {
|
||
+ parser.comments = append(parser.comments, yaml_comment_t{
|
||
+ scan_mark: scan_mark,
|
||
+ token_mark: start_mark,
|
||
+ start_mark: start_mark,
|
||
+ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
|
||
+ head: text,
|
||
+ })
|
||
+ }
|
||
+ return true
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go
|
||
new file mode 100644
|
||
index 000000000000..9210ece7e972
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/sorter.go
|
||
@@ -0,0 +1,134 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "reflect"
|
||
+ "unicode"
|
||
+)
|
||
+
|
||
+type keyList []reflect.Value
|
||
+
|
||
+func (l keyList) Len() int { return len(l) }
|
||
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||
+func (l keyList) Less(i, j int) bool {
|
||
+ a := l[i]
|
||
+ b := l[j]
|
||
+ ak := a.Kind()
|
||
+ bk := b.Kind()
|
||
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
|
||
+ a = a.Elem()
|
||
+ ak = a.Kind()
|
||
+ }
|
||
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
|
||
+ b = b.Elem()
|
||
+ bk = b.Kind()
|
||
+ }
|
||
+ af, aok := keyFloat(a)
|
||
+ bf, bok := keyFloat(b)
|
||
+ if aok && bok {
|
||
+ if af != bf {
|
||
+ return af < bf
|
||
+ }
|
||
+ if ak != bk {
|
||
+ return ak < bk
|
||
+ }
|
||
+ return numLess(a, b)
|
||
+ }
|
||
+ if ak != reflect.String || bk != reflect.String {
|
||
+ return ak < bk
|
||
+ }
|
||
+ ar, br := []rune(a.String()), []rune(b.String())
|
||
+ digits := false
|
||
+ for i := 0; i < len(ar) && i < len(br); i++ {
|
||
+ if ar[i] == br[i] {
|
||
+ digits = unicode.IsDigit(ar[i])
|
||
+ continue
|
||
+ }
|
||
+ al := unicode.IsLetter(ar[i])
|
||
+ bl := unicode.IsLetter(br[i])
|
||
+ if al && bl {
|
||
+ return ar[i] < br[i]
|
||
+ }
|
||
+ if al || bl {
|
||
+ if digits {
|
||
+ return al
|
||
+ } else {
|
||
+ return bl
|
||
+ }
|
||
+ }
|
||
+ var ai, bi int
|
||
+ var an, bn int64
|
||
+ if ar[i] == '0' || br[i] == '0' {
|
||
+ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
|
||
+ if ar[j] != '0' {
|
||
+ an = 1
|
||
+ bn = 1
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
|
||
+ an = an*10 + int64(ar[ai]-'0')
|
||
+ }
|
||
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
|
||
+ bn = bn*10 + int64(br[bi]-'0')
|
||
+ }
|
||
+ if an != bn {
|
||
+ return an < bn
|
||
+ }
|
||
+ if ai != bi {
|
||
+ return ai < bi
|
||
+ }
|
||
+ return ar[i] < br[i]
|
||
+ }
|
||
+ return len(ar) < len(br)
|
||
+}
|
||
+
|
||
+// keyFloat returns a float value for v if it is a number/bool
|
||
+// and whether it is a number/bool or not.
|
||
+func keyFloat(v reflect.Value) (f float64, ok bool) {
|
||
+ switch v.Kind() {
|
||
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||
+ return float64(v.Int()), true
|
||
+ case reflect.Float32, reflect.Float64:
|
||
+ return v.Float(), true
|
||
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||
+ return float64(v.Uint()), true
|
||
+ case reflect.Bool:
|
||
+ if v.Bool() {
|
||
+ return 1, true
|
||
+ }
|
||
+ return 0, true
|
||
+ }
|
||
+ return 0, false
|
||
+}
|
||
+
|
||
+// numLess returns whether a < b.
|
||
+// a and b must necessarily have the same kind.
|
||
+func numLess(a, b reflect.Value) bool {
|
||
+ switch a.Kind() {
|
||
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||
+ return a.Int() < b.Int()
|
||
+ case reflect.Float32, reflect.Float64:
|
||
+ return a.Float() < b.Float()
|
||
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||
+ return a.Uint() < b.Uint()
|
||
+ case reflect.Bool:
|
||
+ return !a.Bool() && b.Bool()
|
||
+ }
|
||
+ panic("not a number")
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go
|
||
new file mode 100644
|
||
index 000000000000..b8a116bf9a22
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/writerc.go
|
||
@@ -0,0 +1,48 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+// Copyright (c) 2006-2010 Kirill Simonov
|
||
+//
|
||
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||
+// this software and associated documentation files (the "Software"), to deal in
|
||
+// the Software without restriction, including without limitation the rights to
|
||
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||
+// of the Software, and to permit persons to whom the Software is furnished to do
|
||
+// so, subject to the following conditions:
|
||
+//
|
||
+// The above copyright notice and this permission notice shall be included in all
|
||
+// copies or substantial portions of the Software.
|
||
+//
|
||
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+// SOFTWARE.
|
||
+
|
||
+package yaml
|
||
+
|
||
+// Set the writer error and return false.
|
||
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
||
+ emitter.error = yaml_WRITER_ERROR
|
||
+ emitter.problem = problem
|
||
+ return false
|
||
+}
|
||
+
|
||
+// Flush the output buffer.
|
||
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
||
+ if emitter.write_handler == nil {
|
||
+ panic("write handler not set")
|
||
+ }
|
||
+
|
||
+ // Check if the buffer is empty.
|
||
+ if emitter.buffer_pos == 0 {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
|
||
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||
+ }
|
||
+ emitter.buffer_pos = 0
|
||
+ return true
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go
|
||
new file mode 100644
|
||
index 000000000000..8cec6da48d3e
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/yaml.go
|
||
@@ -0,0 +1,698 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+// Package yaml implements YAML support for the Go language.
|
||
+//
|
||
+// Source code and other details for the project are available at GitHub:
|
||
+//
|
||
+// https://github.com/go-yaml/yaml
|
||
+//
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "errors"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "reflect"
|
||
+ "strings"
|
||
+ "sync"
|
||
+ "unicode/utf8"
|
||
+)
|
||
+
|
||
+// The Unmarshaler interface may be implemented by types to customize their
|
||
+// behavior when being unmarshaled from a YAML document.
|
||
+type Unmarshaler interface {
|
||
+ UnmarshalYAML(value *Node) error
|
||
+}
|
||
+
|
||
+type obsoleteUnmarshaler interface {
|
||
+ UnmarshalYAML(unmarshal func(interface{}) error) error
|
||
+}
|
||
+
|
||
+// The Marshaler interface may be implemented by types to customize their
|
||
+// behavior when being marshaled into a YAML document. The returned value
|
||
+// is marshaled in place of the original value implementing Marshaler.
|
||
+//
|
||
+// If an error is returned by MarshalYAML, the marshaling procedure stops
|
||
+// and returns with the provided error.
|
||
+type Marshaler interface {
|
||
+ MarshalYAML() (interface{}, error)
|
||
+}
|
||
+
|
||
+// Unmarshal decodes the first document found within the in byte slice
|
||
+// and assigns decoded values into the out value.
|
||
+//
|
||
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
||
+// values. If an internal pointer within a struct is not initialized,
|
||
+// the yaml package will initialize it if necessary for unmarshalling
|
||
+// the provided data. The out parameter must not be nil.
|
||
+//
|
||
+// The type of the decoded values should be compatible with the respective
|
||
+// values in out. If one or more values cannot be decoded due to a type
|
||
+// mismatches, decoding continues partially until the end of the YAML
|
||
+// content, and a *yaml.TypeError is returned with details for all
|
||
+// missed values.
|
||
+//
|
||
+// Struct fields are only unmarshalled if they are exported (have an
|
||
+// upper case first letter), and are unmarshalled using the field name
|
||
+// lowercased as the default key. Custom keys may be defined via the
|
||
+// "yaml" name in the field tag: the content preceding the first comma
|
||
+// is used as the key, and the following comma-separated options are
|
||
+// used to tweak the marshalling process (see Marshal).
|
||
+// Conflicting names result in a runtime error.
|
||
+//
|
||
+// For example:
|
||
+//
|
||
+// type T struct {
|
||
+// F int `yaml:"a,omitempty"`
|
||
+// B int
|
||
+// }
|
||
+// var t T
|
||
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||
+//
|
||
+// See the documentation of Marshal for the format of tags and a list of
|
||
+// supported tag options.
|
||
+//
|
||
+func Unmarshal(in []byte, out interface{}) (err error) {
|
||
+ return unmarshal(in, out, false)
|
||
+}
|
||
+
|
||
+// A Decoder reads and decodes YAML values from an input stream.
|
||
+type Decoder struct {
|
||
+ parser *parser
|
||
+ knownFields bool
|
||
+}
|
||
+
|
||
+// NewDecoder returns a new decoder that reads from r.
|
||
+//
|
||
+// The decoder introduces its own buffering and may read
|
||
+// data from r beyond the YAML values requested.
|
||
+func NewDecoder(r io.Reader) *Decoder {
|
||
+ return &Decoder{
|
||
+ parser: newParserFromReader(r),
|
||
+ }
|
||
+}
|
||
+
|
||
+// KnownFields ensures that the keys in decoded mappings to
|
||
+// exist as fields in the struct being decoded into.
|
||
+func (dec *Decoder) KnownFields(enable bool) {
|
||
+ dec.knownFields = enable
|
||
+}
|
||
+
|
||
+// Decode reads the next YAML-encoded value from its input
|
||
+// and stores it in the value pointed to by v.
|
||
+//
|
||
+// See the documentation for Unmarshal for details about the
|
||
+// conversion of YAML into a Go value.
|
||
+func (dec *Decoder) Decode(v interface{}) (err error) {
|
||
+ d := newDecoder()
|
||
+ d.knownFields = dec.knownFields
|
||
+ defer handleErr(&err)
|
||
+ node := dec.parser.parse()
|
||
+ if node == nil {
|
||
+ return io.EOF
|
||
+ }
|
||
+ out := reflect.ValueOf(v)
|
||
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
|
||
+ out = out.Elem()
|
||
+ }
|
||
+ d.unmarshal(node, out)
|
||
+ if len(d.terrors) > 0 {
|
||
+ return &TypeError{d.terrors}
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// Decode decodes the node and stores its data into the value pointed to by v.
|
||
+//
|
||
+// See the documentation for Unmarshal for details about the
|
||
+// conversion of YAML into a Go value.
|
||
+func (n *Node) Decode(v interface{}) (err error) {
|
||
+ d := newDecoder()
|
||
+ defer handleErr(&err)
|
||
+ out := reflect.ValueOf(v)
|
||
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
|
||
+ out = out.Elem()
|
||
+ }
|
||
+ d.unmarshal(n, out)
|
||
+ if len(d.terrors) > 0 {
|
||
+ return &TypeError{d.terrors}
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
||
+ defer handleErr(&err)
|
||
+ d := newDecoder()
|
||
+ p := newParser(in)
|
||
+ defer p.destroy()
|
||
+ node := p.parse()
|
||
+ if node != nil {
|
||
+ v := reflect.ValueOf(out)
|
||
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
|
||
+ v = v.Elem()
|
||
+ }
|
||
+ d.unmarshal(node, v)
|
||
+ }
|
||
+ if len(d.terrors) > 0 {
|
||
+ return &TypeError{d.terrors}
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// Marshal serializes the value provided into a YAML document. The structure
|
||
+// of the generated document will reflect the structure of the value itself.
|
||
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||
+//
|
||
+// Struct fields are only marshalled if they are exported (have an upper case
|
||
+// first letter), and are marshalled using the field name lowercased as the
|
||
+// default key. Custom keys may be defined via the "yaml" name in the field
|
||
+// tag: the content preceding the first comma is used as the key, and the
|
||
+// following comma-separated options are used to tweak the marshalling process.
|
||
+// Conflicting names result in a runtime error.
|
||
+//
|
||
+// The field tag format accepted is:
|
||
+//
|
||
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||
+//
|
||
+// The following flags are currently supported:
|
||
+//
|
||
+// omitempty Only include the field if it's not set to the zero
|
||
+// value for the type or to empty slices or maps.
|
||
+// Zero valued structs will be omitted if all their public
|
||
+// fields are zero, unless they implement an IsZero
|
||
+// method (see the IsZeroer interface type), in which
|
||
+// case the field will be excluded if IsZero returns true.
|
||
+//
|
||
+// flow Marshal using a flow style (useful for structs,
|
||
+// sequences and maps).
|
||
+//
|
||
+// inline Inline the field, which must be a struct or a map,
|
||
+// causing all of its fields or keys to be processed as if
|
||
+// they were part of the outer struct. For maps, keys must
|
||
+// not conflict with the yaml keys of other struct fields.
|
||
+//
|
||
+// In addition, if the key is "-", the field is ignored.
|
||
+//
|
||
+// For example:
|
||
+//
|
||
+// type T struct {
|
||
+// F int `yaml:"a,omitempty"`
|
||
+// B int
|
||
+// }
|
||
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||
+//
|
||
+func Marshal(in interface{}) (out []byte, err error) {
|
||
+ defer handleErr(&err)
|
||
+ e := newEncoder()
|
||
+ defer e.destroy()
|
||
+ e.marshalDoc("", reflect.ValueOf(in))
|
||
+ e.finish()
|
||
+ out = e.out
|
||
+ return
|
||
+}
|
||
+
|
||
+// An Encoder writes YAML values to an output stream.
|
||
+type Encoder struct {
|
||
+ encoder *encoder
|
||
+}
|
||
+
|
||
+// NewEncoder returns a new encoder that writes to w.
|
||
+// The Encoder should be closed after use to flush all data
|
||
+// to w.
|
||
+func NewEncoder(w io.Writer) *Encoder {
|
||
+ return &Encoder{
|
||
+ encoder: newEncoderWithWriter(w),
|
||
+ }
|
||
+}
|
||
+
|
||
+// Encode writes the YAML encoding of v to the stream.
|
||
+// If multiple items are encoded to the stream, the
|
||
+// second and subsequent document will be preceded
|
||
+// with a "---" document separator, but the first will not.
|
||
+//
|
||
+// See the documentation for Marshal for details about the conversion of Go
|
||
+// values to YAML.
|
||
+func (e *Encoder) Encode(v interface{}) (err error) {
|
||
+ defer handleErr(&err)
|
||
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// Encode encodes value v and stores its representation in n.
|
||
+//
|
||
+// See the documentation for Marshal for details about the
|
||
+// conversion of Go values into YAML.
|
||
+func (n *Node) Encode(v interface{}) (err error) {
|
||
+ defer handleErr(&err)
|
||
+ e := newEncoder()
|
||
+ defer e.destroy()
|
||
+ e.marshalDoc("", reflect.ValueOf(v))
|
||
+ e.finish()
|
||
+ p := newParser(e.out)
|
||
+ p.textless = true
|
||
+ defer p.destroy()
|
||
+ doc := p.parse()
|
||
+ *n = *doc.Content[0]
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// SetIndent changes the used indentation used when encoding.
|
||
+func (e *Encoder) SetIndent(spaces int) {
|
||
+ if spaces < 0 {
|
||
+ panic("yaml: cannot indent to a negative number of spaces")
|
||
+ }
|
||
+ e.encoder.indent = spaces
|
||
+}
|
||
+
|
||
+// Close closes the encoder by writing any remaining data.
|
||
+// It does not write a stream terminating string "...".
|
||
+func (e *Encoder) Close() (err error) {
|
||
+ defer handleErr(&err)
|
||
+ e.encoder.finish()
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func handleErr(err *error) {
|
||
+ if v := recover(); v != nil {
|
||
+ if e, ok := v.(yamlError); ok {
|
||
+ *err = e.err
|
||
+ } else {
|
||
+ panic(v)
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+type yamlError struct {
|
||
+ err error
|
||
+}
|
||
+
|
||
+func fail(err error) {
|
||
+ panic(yamlError{err})
|
||
+}
|
||
+
|
||
+func failf(format string, args ...interface{}) {
|
||
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
|
||
+}
|
||
+
|
||
+// A TypeError is returned by Unmarshal when one or more fields in
|
||
+// the YAML document cannot be properly decoded into the requested
|
||
+// types. When this error is returned, the value is still
|
||
+// unmarshaled partially.
|
||
+type TypeError struct {
|
||
+ Errors []string
|
||
+}
|
||
+
|
||
+func (e *TypeError) Error() string {
|
||
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
|
||
+}
|
||
+
|
||
+type Kind uint32
|
||
+
|
||
+const (
|
||
+ DocumentNode Kind = 1 << iota
|
||
+ SequenceNode
|
||
+ MappingNode
|
||
+ ScalarNode
|
||
+ AliasNode
|
||
+)
|
||
+
|
||
+type Style uint32
|
||
+
|
||
+const (
|
||
+ TaggedStyle Style = 1 << iota
|
||
+ DoubleQuotedStyle
|
||
+ SingleQuotedStyle
|
||
+ LiteralStyle
|
||
+ FoldedStyle
|
||
+ FlowStyle
|
||
+)
|
||
+
|
||
+// Node represents an element in the YAML document hierarchy. While documents
|
||
+// are typically encoded and decoded into higher level types, such as structs
|
||
+// and maps, Node is an intermediate representation that allows detailed
|
||
+// control over the content being decoded or encoded.
|
||
+//
|
||
+// It's worth noting that although Node offers access into details such as
|
||
+// line numbers, colums, and comments, the content when re-encoded will not
|
||
+// have its original textual representation preserved. An effort is made to
|
||
+// render the data plesantly, and to preserve comments near the data they
|
||
+// describe, though.
|
||
+//
|
||
+// Values that make use of the Node type interact with the yaml package in the
|
||
+// same way any other type would do, by encoding and decoding yaml data
|
||
+// directly or indirectly into them.
|
||
+//
|
||
+// For example:
|
||
+//
|
||
+// var person struct {
|
||
+// Name string
|
||
+// Address yaml.Node
|
||
+// }
|
||
+// err := yaml.Unmarshal(data, &person)
|
||
+//
|
||
+// Or by itself:
|
||
+//
|
||
+// var person Node
|
||
+// err := yaml.Unmarshal(data, &person)
|
||
+//
|
||
+type Node struct {
|
||
+ // Kind defines whether the node is a document, a mapping, a sequence,
|
||
+ // a scalar value, or an alias to another node. The specific data type of
|
||
+ // scalar nodes may be obtained via the ShortTag and LongTag methods.
|
||
+ Kind Kind
|
||
+
|
||
+ // Style allows customizing the apperance of the node in the tree.
|
||
+ Style Style
|
||
+
|
||
+ // Tag holds the YAML tag defining the data type for the value.
|
||
+ // When decoding, this field will always be set to the resolved tag,
|
||
+ // even when it wasn't explicitly provided in the YAML content.
|
||
+ // When encoding, if this field is unset the value type will be
|
||
+ // implied from the node properties, and if it is set, it will only
|
||
+ // be serialized into the representation if TaggedStyle is used or
|
||
+ // the implicit tag diverges from the provided one.
|
||
+ Tag string
|
||
+
|
||
+ // Value holds the unescaped and unquoted represenation of the value.
|
||
+ Value string
|
||
+
|
||
+ // Anchor holds the anchor name for this node, which allows aliases to point to it.
|
||
+ Anchor string
|
||
+
|
||
+ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
|
||
+ Alias *Node
|
||
+
|
||
+ // Content holds contained nodes for documents, mappings, and sequences.
|
||
+ Content []*Node
|
||
+
|
||
+ // HeadComment holds any comments in the lines preceding the node and
|
||
+ // not separated by an empty line.
|
||
+ HeadComment string
|
||
+
|
||
+ // LineComment holds any comments at the end of the line where the node is in.
|
||
+ LineComment string
|
||
+
|
||
+ // FootComment holds any comments following the node and before empty lines.
|
||
+ FootComment string
|
||
+
|
||
+ // Line and Column hold the node position in the decoded YAML text.
|
||
+ // These fields are not respected when encoding the node.
|
||
+ Line int
|
||
+ Column int
|
||
+}
|
||
+
|
||
+// IsZero returns whether the node has all of its fields unset.
|
||
+func (n *Node) IsZero() bool {
|
||
+ return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
|
||
+ n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
|
||
+}
|
||
+
|
||
+
|
||
+// LongTag returns the long form of the tag that indicates the data type for
|
||
+// the node. If the Tag field isn't explicitly defined, one will be computed
|
||
+// based on the node properties.
|
||
+func (n *Node) LongTag() string {
|
||
+ return longTag(n.ShortTag())
|
||
+}
|
||
+
|
||
+// ShortTag returns the short form of the YAML tag that indicates data type for
|
||
+// the node. If the Tag field isn't explicitly defined, one will be computed
|
||
+// based on the node properties.
|
||
+func (n *Node) ShortTag() string {
|
||
+ if n.indicatedString() {
|
||
+ return strTag
|
||
+ }
|
||
+ if n.Tag == "" || n.Tag == "!" {
|
||
+ switch n.Kind {
|
||
+ case MappingNode:
|
||
+ return mapTag
|
||
+ case SequenceNode:
|
||
+ return seqTag
|
||
+ case AliasNode:
|
||
+ if n.Alias != nil {
|
||
+ return n.Alias.ShortTag()
|
||
+ }
|
||
+ case ScalarNode:
|
||
+ tag, _ := resolve("", n.Value)
|
||
+ return tag
|
||
+ case 0:
|
||
+ // Special case to make the zero value convenient.
|
||
+ if n.IsZero() {
|
||
+ return nullTag
|
||
+ }
|
||
+ }
|
||
+ return ""
|
||
+ }
|
||
+ return shortTag(n.Tag)
|
||
+}
|
||
+
|
||
+func (n *Node) indicatedString() bool {
|
||
+ return n.Kind == ScalarNode &&
|
||
+ (shortTag(n.Tag) == strTag ||
|
||
+ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
|
||
+}
|
||
+
|
||
+// SetString is a convenience function that sets the node to a string value
|
||
+// and defines its style in a pleasant way depending on its content.
|
||
+func (n *Node) SetString(s string) {
|
||
+ n.Kind = ScalarNode
|
||
+ if utf8.ValidString(s) {
|
||
+ n.Value = s
|
||
+ n.Tag = strTag
|
||
+ } else {
|
||
+ n.Value = encodeBase64(s)
|
||
+ n.Tag = binaryTag
|
||
+ }
|
||
+ if strings.Contains(n.Value, "\n") {
|
||
+ n.Style = LiteralStyle
|
||
+ }
|
||
+}
|
||
+
|
||
+// --------------------------------------------------------------------------
|
||
+// Maintain a mapping of keys to structure field indexes
|
||
+
|
||
+// The code in this section was copied from mgo/bson.
|
||
+
|
||
+// structInfo holds details for the serialization of fields of
|
||
+// a given struct.
|
||
+type structInfo struct {
|
||
+ FieldsMap map[string]fieldInfo
|
||
+ FieldsList []fieldInfo
|
||
+
|
||
+ // InlineMap is the number of the field in the struct that
|
||
+ // contains an ,inline map, or -1 if there's none.
|
||
+ InlineMap int
|
||
+
|
||
+ // InlineUnmarshalers holds indexes to inlined fields that
|
||
+ // contain unmarshaler values.
|
||
+ InlineUnmarshalers [][]int
|
||
+}
|
||
+
|
||
+type fieldInfo struct {
|
||
+ Key string
|
||
+ Num int
|
||
+ OmitEmpty bool
|
||
+ Flow bool
|
||
+ // Id holds the unique field identifier, so we can cheaply
|
||
+ // check for field duplicates without maintaining an extra map.
|
||
+ Id int
|
||
+
|
||
+ // Inline holds the field index if the field is part of an inlined struct.
|
||
+ Inline []int
|
||
+}
|
||
+
|
||
+var structMap = make(map[reflect.Type]*structInfo)
|
||
+var fieldMapMutex sync.RWMutex
|
||
+var unmarshalerType reflect.Type
|
||
+
|
||
+func init() {
|
||
+ var v Unmarshaler
|
||
+ unmarshalerType = reflect.ValueOf(&v).Elem().Type()
|
||
+}
|
||
+
|
||
+func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||
+ fieldMapMutex.RLock()
|
||
+ sinfo, found := structMap[st]
|
||
+ fieldMapMutex.RUnlock()
|
||
+ if found {
|
||
+ return sinfo, nil
|
||
+ }
|
||
+
|
||
+ n := st.NumField()
|
||
+ fieldsMap := make(map[string]fieldInfo)
|
||
+ fieldsList := make([]fieldInfo, 0, n)
|
||
+ inlineMap := -1
|
||
+ inlineUnmarshalers := [][]int(nil)
|
||
+ for i := 0; i != n; i++ {
|
||
+ field := st.Field(i)
|
||
+ if field.PkgPath != "" && !field.Anonymous {
|
||
+ continue // Private field
|
||
+ }
|
||
+
|
||
+ info := fieldInfo{Num: i}
|
||
+
|
||
+ tag := field.Tag.Get("yaml")
|
||
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||
+ tag = string(field.Tag)
|
||
+ }
|
||
+ if tag == "-" {
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ inline := false
|
||
+ fields := strings.Split(tag, ",")
|
||
+ if len(fields) > 1 {
|
||
+ for _, flag := range fields[1:] {
|
||
+ switch flag {
|
||
+ case "omitempty":
|
||
+ info.OmitEmpty = true
|
||
+ case "flow":
|
||
+ info.Flow = true
|
||
+ case "inline":
|
||
+ inline = true
|
||
+ default:
|
||
+ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
|
||
+ }
|
||
+ }
|
||
+ tag = fields[0]
|
||
+ }
|
||
+
|
||
+ if inline {
|
||
+ switch field.Type.Kind() {
|
||
+ case reflect.Map:
|
||
+ if inlineMap >= 0 {
|
||
+ return nil, errors.New("multiple ,inline maps in struct " + st.String())
|
||
+ }
|
||
+ if field.Type.Key() != reflect.TypeOf("") {
|
||
+ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
|
||
+ }
|
||
+ inlineMap = info.Num
|
||
+ case reflect.Struct, reflect.Ptr:
|
||
+ ftype := field.Type
|
||
+ for ftype.Kind() == reflect.Ptr {
|
||
+ ftype = ftype.Elem()
|
||
+ }
|
||
+ if ftype.Kind() != reflect.Struct {
|
||
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
|
||
+ }
|
||
+ if reflect.PtrTo(ftype).Implements(unmarshalerType) {
|
||
+ inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
|
||
+ } else {
|
||
+ sinfo, err := getStructInfo(ftype)
|
||
+ if err != nil {
|
||
+ return nil, err
|
||
+ }
|
||
+ for _, index := range sinfo.InlineUnmarshalers {
|
||
+ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
|
||
+ }
|
||
+ for _, finfo := range sinfo.FieldsList {
|
||
+ if _, found := fieldsMap[finfo.Key]; found {
|
||
+ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||
+ return nil, errors.New(msg)
|
||
+ }
|
||
+ if finfo.Inline == nil {
|
||
+ finfo.Inline = []int{i, finfo.Num}
|
||
+ } else {
|
||
+ finfo.Inline = append([]int{i}, finfo.Inline...)
|
||
+ }
|
||
+ finfo.Id = len(fieldsList)
|
||
+ fieldsMap[finfo.Key] = finfo
|
||
+ fieldsList = append(fieldsList, finfo)
|
||
+ }
|
||
+ }
|
||
+ default:
|
||
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
|
||
+ }
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ if tag != "" {
|
||
+ info.Key = tag
|
||
+ } else {
|
||
+ info.Key = strings.ToLower(field.Name)
|
||
+ }
|
||
+
|
||
+ if _, found = fieldsMap[info.Key]; found {
|
||
+ msg := "duplicated key '" + info.Key + "' in struct " + st.String()
|
||
+ return nil, errors.New(msg)
|
||
+ }
|
||
+
|
||
+ info.Id = len(fieldsList)
|
||
+ fieldsList = append(fieldsList, info)
|
||
+ fieldsMap[info.Key] = info
|
||
+ }
|
||
+
|
||
+ sinfo = &structInfo{
|
||
+ FieldsMap: fieldsMap,
|
||
+ FieldsList: fieldsList,
|
||
+ InlineMap: inlineMap,
|
||
+ InlineUnmarshalers: inlineUnmarshalers,
|
||
+ }
|
||
+
|
||
+ fieldMapMutex.Lock()
|
||
+ structMap[st] = sinfo
|
||
+ fieldMapMutex.Unlock()
|
||
+ return sinfo, nil
|
||
+}
|
||
+
|
||
+// IsZeroer is used to check whether an object is zero to
|
||
+// determine whether it should be omitted when marshaling
|
||
+// with the omitempty flag. One notable implementation
|
||
+// is time.Time.
|
||
+type IsZeroer interface {
|
||
+ IsZero() bool
|
||
+}
|
||
+
|
||
+func isZero(v reflect.Value) bool {
|
||
+ kind := v.Kind()
|
||
+ if z, ok := v.Interface().(IsZeroer); ok {
|
||
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
|
||
+ return true
|
||
+ }
|
||
+ return z.IsZero()
|
||
+ }
|
||
+ switch kind {
|
||
+ case reflect.String:
|
||
+ return len(v.String()) == 0
|
||
+ case reflect.Interface, reflect.Ptr:
|
||
+ return v.IsNil()
|
||
+ case reflect.Slice:
|
||
+ return v.Len() == 0
|
||
+ case reflect.Map:
|
||
+ return v.Len() == 0
|
||
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||
+ return v.Int() == 0
|
||
+ case reflect.Float32, reflect.Float64:
|
||
+ return v.Float() == 0
|
||
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||
+ return v.Uint() == 0
|
||
+ case reflect.Bool:
|
||
+ return !v.Bool()
|
||
+ case reflect.Struct:
|
||
+ vt := v.Type()
|
||
+ for i := v.NumField() - 1; i >= 0; i-- {
|
||
+ if vt.Field(i).PkgPath != "" {
|
||
+ continue // Private field
|
||
+ }
|
||
+ if !isZero(v.Field(i)) {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+ }
|
||
+ return false
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go
|
||
new file mode 100644
|
||
index 000000000000..7c6d00770619
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/yamlh.go
|
||
@@ -0,0 +1,807 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+// Copyright (c) 2006-2010 Kirill Simonov
|
||
+//
|
||
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||
+// this software and associated documentation files (the "Software"), to deal in
|
||
+// the Software without restriction, including without limitation the rights to
|
||
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||
+// of the Software, and to permit persons to whom the Software is furnished to do
|
||
+// so, subject to the following conditions:
|
||
+//
|
||
+// The above copyright notice and this permission notice shall be included in all
|
||
+// copies or substantial portions of the Software.
|
||
+//
|
||
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+// SOFTWARE.
|
||
+
|
||
+package yaml
|
||
+
|
||
+import (
|
||
+ "fmt"
|
||
+ "io"
|
||
+)
|
||
+
|
||
+// The version directive data.
|
||
+type yaml_version_directive_t struct {
|
||
+ major int8 // The major version number.
|
||
+ minor int8 // The minor version number.
|
||
+}
|
||
+
|
||
+// The tag directive data.
|
||
+type yaml_tag_directive_t struct {
|
||
+ handle []byte // The tag handle.
|
||
+ prefix []byte // The tag prefix.
|
||
+}
|
||
+
|
||
+type yaml_encoding_t int
|
||
+
|
||
+// The stream encoding.
|
||
+const (
|
||
+ // Let the parser choose the encoding.
|
||
+ yaml_ANY_ENCODING yaml_encoding_t = iota
|
||
+
|
||
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
|
||
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
|
||
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
|
||
+)
|
||
+
|
||
+type yaml_break_t int
|
||
+
|
||
+// Line break types.
|
||
+const (
|
||
+ // Let the parser choose the break type.
|
||
+ yaml_ANY_BREAK yaml_break_t = iota
|
||
+
|
||
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
|
||
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
|
||
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
|
||
+)
|
||
+
|
||
+type yaml_error_type_t int
|
||
+
|
||
+// Many bad things could happen with the parser and emitter.
|
||
+const (
|
||
+ // No error is produced.
|
||
+ yaml_NO_ERROR yaml_error_type_t = iota
|
||
+
|
||
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
|
||
+ yaml_READER_ERROR // Cannot read or decode the input stream.
|
||
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
|
||
+ yaml_PARSER_ERROR // Cannot parse the input stream.
|
||
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
|
||
+ yaml_WRITER_ERROR // Cannot write to the output stream.
|
||
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
|
||
+)
|
||
+
|
||
+// The pointer position.
|
||
+type yaml_mark_t struct {
|
||
+ index int // The position index.
|
||
+ line int // The position line.
|
||
+ column int // The position column.
|
||
+}
|
||
+
|
||
+// Node Styles
|
||
+
|
||
+type yaml_style_t int8
|
||
+
|
||
+type yaml_scalar_style_t yaml_style_t
|
||
+
|
||
+// Scalar styles.
|
||
+const (
|
||
+ // Let the emitter choose the style.
|
||
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
|
||
+
|
||
+ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
|
||
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
|
||
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
|
||
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
|
||
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
|
||
+)
|
||
+
|
||
+type yaml_sequence_style_t yaml_style_t
|
||
+
|
||
+// Sequence styles.
|
||
+const (
|
||
+ // Let the emitter choose the style.
|
||
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
||
+
|
||
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
|
||
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
|
||
+)
|
||
+
|
||
+type yaml_mapping_style_t yaml_style_t
|
||
+
|
||
+// Mapping styles.
|
||
+const (
|
||
+ // Let the emitter choose the style.
|
||
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
||
+
|
||
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
|
||
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
|
||
+)
|
||
+
|
||
+// Tokens
|
||
+
|
||
+type yaml_token_type_t int
|
||
+
|
||
+// Token types.
|
||
+const (
|
||
+ // An empty token.
|
||
+ yaml_NO_TOKEN yaml_token_type_t = iota
|
||
+
|
||
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
|
||
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
|
||
+
|
||
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
|
||
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
|
||
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
|
||
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
|
||
+
|
||
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
|
||
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
|
||
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
|
||
+
|
||
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
|
||
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
|
||
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
|
||
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
|
||
+
|
||
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
|
||
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
|
||
+ yaml_KEY_TOKEN // A KEY token.
|
||
+ yaml_VALUE_TOKEN // A VALUE token.
|
||
+
|
||
+ yaml_ALIAS_TOKEN // An ALIAS token.
|
||
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
|
||
+ yaml_TAG_TOKEN // A TAG token.
|
||
+ yaml_SCALAR_TOKEN // A SCALAR token.
|
||
+)
|
||
+
|
||
+func (tt yaml_token_type_t) String() string {
|
||
+ switch tt {
|
||
+ case yaml_NO_TOKEN:
|
||
+ return "yaml_NO_TOKEN"
|
||
+ case yaml_STREAM_START_TOKEN:
|
||
+ return "yaml_STREAM_START_TOKEN"
|
||
+ case yaml_STREAM_END_TOKEN:
|
||
+ return "yaml_STREAM_END_TOKEN"
|
||
+ case yaml_VERSION_DIRECTIVE_TOKEN:
|
||
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
|
||
+ case yaml_TAG_DIRECTIVE_TOKEN:
|
||
+ return "yaml_TAG_DIRECTIVE_TOKEN"
|
||
+ case yaml_DOCUMENT_START_TOKEN:
|
||
+ return "yaml_DOCUMENT_START_TOKEN"
|
||
+ case yaml_DOCUMENT_END_TOKEN:
|
||
+ return "yaml_DOCUMENT_END_TOKEN"
|
||
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
|
||
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
|
||
+ case yaml_BLOCK_MAPPING_START_TOKEN:
|
||
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
|
||
+ case yaml_BLOCK_END_TOKEN:
|
||
+ return "yaml_BLOCK_END_TOKEN"
|
||
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
|
||
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
|
||
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
|
||
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
|
||
+ case yaml_FLOW_MAPPING_START_TOKEN:
|
||
+ return "yaml_FLOW_MAPPING_START_TOKEN"
|
||
+ case yaml_FLOW_MAPPING_END_TOKEN:
|
||
+ return "yaml_FLOW_MAPPING_END_TOKEN"
|
||
+ case yaml_BLOCK_ENTRY_TOKEN:
|
||
+ return "yaml_BLOCK_ENTRY_TOKEN"
|
||
+ case yaml_FLOW_ENTRY_TOKEN:
|
||
+ return "yaml_FLOW_ENTRY_TOKEN"
|
||
+ case yaml_KEY_TOKEN:
|
||
+ return "yaml_KEY_TOKEN"
|
||
+ case yaml_VALUE_TOKEN:
|
||
+ return "yaml_VALUE_TOKEN"
|
||
+ case yaml_ALIAS_TOKEN:
|
||
+ return "yaml_ALIAS_TOKEN"
|
||
+ case yaml_ANCHOR_TOKEN:
|
||
+ return "yaml_ANCHOR_TOKEN"
|
||
+ case yaml_TAG_TOKEN:
|
||
+ return "yaml_TAG_TOKEN"
|
||
+ case yaml_SCALAR_TOKEN:
|
||
+ return "yaml_SCALAR_TOKEN"
|
||
+ }
|
||
+ return "<unknown token>"
|
||
+}
|
||
+
|
||
+// The token structure.
|
||
+type yaml_token_t struct {
|
||
+ // The token type.
|
||
+ typ yaml_token_type_t
|
||
+
|
||
+ // The start/end of the token.
|
||
+ start_mark, end_mark yaml_mark_t
|
||
+
|
||
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
|
||
+ encoding yaml_encoding_t
|
||
+
|
||
+ // The alias/anchor/scalar value or tag/tag directive handle
|
||
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
|
||
+ value []byte
|
||
+
|
||
+ // The tag suffix (for yaml_TAG_TOKEN).
|
||
+ suffix []byte
|
||
+
|
||
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
|
||
+ prefix []byte
|
||
+
|
||
+ // The scalar style (for yaml_SCALAR_TOKEN).
|
||
+ style yaml_scalar_style_t
|
||
+
|
||
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
|
||
+ major, minor int8
|
||
+}
|
||
+
|
||
+// Events
|
||
+
|
||
+type yaml_event_type_t int8
|
||
+
|
||
+// Event types.
|
||
+const (
|
||
+ // An empty event.
|
||
+ yaml_NO_EVENT yaml_event_type_t = iota
|
||
+
|
||
+ yaml_STREAM_START_EVENT // A STREAM-START event.
|
||
+ yaml_STREAM_END_EVENT // A STREAM-END event.
|
||
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
|
||
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
|
||
+ yaml_ALIAS_EVENT // An ALIAS event.
|
||
+ yaml_SCALAR_EVENT // A SCALAR event.
|
||
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
|
||
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
|
||
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
|
||
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
|
||
+ yaml_TAIL_COMMENT_EVENT
|
||
+)
|
||
+
|
||
+var eventStrings = []string{
|
||
+ yaml_NO_EVENT: "none",
|
||
+ yaml_STREAM_START_EVENT: "stream start",
|
||
+ yaml_STREAM_END_EVENT: "stream end",
|
||
+ yaml_DOCUMENT_START_EVENT: "document start",
|
||
+ yaml_DOCUMENT_END_EVENT: "document end",
|
||
+ yaml_ALIAS_EVENT: "alias",
|
||
+ yaml_SCALAR_EVENT: "scalar",
|
||
+ yaml_SEQUENCE_START_EVENT: "sequence start",
|
||
+ yaml_SEQUENCE_END_EVENT: "sequence end",
|
||
+ yaml_MAPPING_START_EVENT: "mapping start",
|
||
+ yaml_MAPPING_END_EVENT: "mapping end",
|
||
+ yaml_TAIL_COMMENT_EVENT: "tail comment",
|
||
+}
|
||
+
|
||
+func (e yaml_event_type_t) String() string {
|
||
+ if e < 0 || int(e) >= len(eventStrings) {
|
||
+ return fmt.Sprintf("unknown event %d", e)
|
||
+ }
|
||
+ return eventStrings[e]
|
||
+}
|
||
+
|
||
+// The event structure.
|
||
+type yaml_event_t struct {
|
||
+
|
||
+ // The event type.
|
||
+ typ yaml_event_type_t
|
||
+
|
||
+ // The start and end of the event.
|
||
+ start_mark, end_mark yaml_mark_t
|
||
+
|
||
+ // The document encoding (for yaml_STREAM_START_EVENT).
|
||
+ encoding yaml_encoding_t
|
||
+
|
||
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
|
||
+ version_directive *yaml_version_directive_t
|
||
+
|
||
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
|
||
+ tag_directives []yaml_tag_directive_t
|
||
+
|
||
+ // The comments
|
||
+ head_comment []byte
|
||
+ line_comment []byte
|
||
+ foot_comment []byte
|
||
+ tail_comment []byte
|
||
+
|
||
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
|
||
+ anchor []byte
|
||
+
|
||
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||
+ tag []byte
|
||
+
|
||
+ // The scalar value (for yaml_SCALAR_EVENT).
|
||
+ value []byte
|
||
+
|
||
+ // Is the document start/end indicator implicit, or the tag optional?
|
||
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
|
||
+ implicit bool
|
||
+
|
||
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
|
||
+ quoted_implicit bool
|
||
+
|
||
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||
+ style yaml_style_t
|
||
+}
|
||
+
|
||
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
|
||
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
|
||
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
|
||
+
|
||
+// Nodes
|
||
+
|
||
+const (
|
||
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
|
||
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
|
||
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
|
||
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
|
||
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
|
||
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
|
||
+
|
||
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
||
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
||
+
|
||
+ // Not in original libyaml.
|
||
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
||
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
|
||
+
|
||
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
||
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
||
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
||
+)
|
||
+
|
||
+type yaml_node_type_t int
|
||
+
|
||
+// Node types.
|
||
+const (
|
||
+ // An empty node.
|
||
+ yaml_NO_NODE yaml_node_type_t = iota
|
||
+
|
||
+ yaml_SCALAR_NODE // A scalar node.
|
||
+ yaml_SEQUENCE_NODE // A sequence node.
|
||
+ yaml_MAPPING_NODE // A mapping node.
|
||
+)
|
||
+
|
||
+// An element of a sequence node.
|
||
+type yaml_node_item_t int
|
||
+
|
||
+// An element of a mapping node.
|
||
+type yaml_node_pair_t struct {
|
||
+ key int // The key of the element.
|
||
+ value int // The value of the element.
|
||
+}
|
||
+
|
||
+// The node structure.
|
||
+type yaml_node_t struct {
|
||
+ typ yaml_node_type_t // The node type.
|
||
+ tag []byte // The node tag.
|
||
+
|
||
+ // The node data.
|
||
+
|
||
+ // The scalar parameters (for yaml_SCALAR_NODE).
|
||
+ scalar struct {
|
||
+ value []byte // The scalar value.
|
||
+ length int // The length of the scalar value.
|
||
+ style yaml_scalar_style_t // The scalar style.
|
||
+ }
|
||
+
|
||
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
|
||
+ sequence struct {
|
||
+ items_data []yaml_node_item_t // The stack of sequence items.
|
||
+ style yaml_sequence_style_t // The sequence style.
|
||
+ }
|
||
+
|
||
+ // The mapping parameters (for yaml_MAPPING_NODE).
|
||
+ mapping struct {
|
||
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
|
||
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
|
||
+ pairs_end *yaml_node_pair_t // The end of the stack.
|
||
+ pairs_top *yaml_node_pair_t // The top of the stack.
|
||
+ style yaml_mapping_style_t // The mapping style.
|
||
+ }
|
||
+
|
||
+ start_mark yaml_mark_t // The beginning of the node.
|
||
+ end_mark yaml_mark_t // The end of the node.
|
||
+
|
||
+}
|
||
+
|
||
+// The document structure.
|
||
+type yaml_document_t struct {
|
||
+
|
||
+ // The document nodes.
|
||
+ nodes []yaml_node_t
|
||
+
|
||
+ // The version directive.
|
||
+ version_directive *yaml_version_directive_t
|
||
+
|
||
+ // The list of tag directives.
|
||
+ tag_directives_data []yaml_tag_directive_t
|
||
+ tag_directives_start int // The beginning of the tag directives list.
|
||
+ tag_directives_end int // The end of the tag directives list.
|
||
+
|
||
+ start_implicit int // Is the document start indicator implicit?
|
||
+ end_implicit int // Is the document end indicator implicit?
|
||
+
|
||
+ // The start/end of the document.
|
||
+ start_mark, end_mark yaml_mark_t
|
||
+}
|
||
+
|
||
+// The prototype of a read handler.
|
||
+//
|
||
+// The read handler is called when the parser needs to read more bytes from the
|
||
+// source. The handler should write not more than size bytes to the buffer.
|
||
+// The number of written bytes should be set to the size_read variable.
|
||
+//
|
||
+// [in,out] data A pointer to an application data specified by
|
||
+// yaml_parser_set_input().
|
||
+// [out] buffer The buffer to write the data from the source.
|
||
+// [in] size The size of the buffer.
|
||
+// [out] size_read The actual number of bytes read from the source.
|
||
+//
|
||
+// On success, the handler should return 1. If the handler failed,
|
||
+// the returned value should be 0. On EOF, the handler should set the
|
||
+// size_read to 0 and return 1.
|
||
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
||
+
|
||
+// This structure holds information about a potential simple key.
|
||
+type yaml_simple_key_t struct {
|
||
+ possible bool // Is a simple key possible?
|
||
+ required bool // Is a simple key required?
|
||
+ token_number int // The number of the token.
|
||
+ mark yaml_mark_t // The position mark.
|
||
+}
|
||
+
|
||
+// The states of the parser.
|
||
+type yaml_parser_state_t int
|
||
+
|
||
+const (
|
||
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
||
+
|
||
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
|
||
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
|
||
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
|
||
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
|
||
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
|
||
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
|
||
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
|
||
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
|
||
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
|
||
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
|
||
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
|
||
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
|
||
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
|
||
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
|
||
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
|
||
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
|
||
+ yaml_PARSE_END_STATE // Expect nothing.
|
||
+)
|
||
+
|
||
+func (ps yaml_parser_state_t) String() string {
|
||
+ switch ps {
|
||
+ case yaml_PARSE_STREAM_START_STATE:
|
||
+ return "yaml_PARSE_STREAM_START_STATE"
|
||
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
|
||
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
|
||
+ case yaml_PARSE_DOCUMENT_START_STATE:
|
||
+ return "yaml_PARSE_DOCUMENT_START_STATE"
|
||
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
|
||
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
|
||
+ case yaml_PARSE_DOCUMENT_END_STATE:
|
||
+ return "yaml_PARSE_DOCUMENT_END_STATE"
|
||
+ case yaml_PARSE_BLOCK_NODE_STATE:
|
||
+ return "yaml_PARSE_BLOCK_NODE_STATE"
|
||
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
|
||
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
|
||
+ case yaml_PARSE_FLOW_NODE_STATE:
|
||
+ return "yaml_PARSE_FLOW_NODE_STATE"
|
||
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
|
||
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
|
||
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
|
||
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
|
||
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
|
||
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
|
||
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
|
||
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
|
||
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
|
||
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
|
||
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
|
||
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
|
||
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
|
||
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
|
||
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
|
||
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
|
||
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
|
||
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
|
||
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
|
||
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
|
||
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
|
||
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
|
||
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
|
||
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
|
||
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
|
||
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
|
||
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
|
||
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
|
||
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
|
||
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
|
||
+ case yaml_PARSE_END_STATE:
|
||
+ return "yaml_PARSE_END_STATE"
|
||
+ }
|
||
+ return "<unknown parser state>"
|
||
+}
|
||
+
|
||
+// This structure holds aliases data.
|
||
+type yaml_alias_data_t struct {
|
||
+ anchor []byte // The anchor.
|
||
+ index int // The node id.
|
||
+ mark yaml_mark_t // The anchor mark.
|
||
+}
|
||
+
|
||
+// The parser structure.
|
||
+//
|
||
+// All members are internal. Manage the structure using the
|
||
+// yaml_parser_ family of functions.
|
||
+type yaml_parser_t struct {
|
||
+
|
||
+ // Error handling
|
||
+
|
||
+ error yaml_error_type_t // Error type.
|
||
+
|
||
+ problem string // Error description.
|
||
+
|
||
+ // The byte about which the problem occurred.
|
||
+ problem_offset int
|
||
+ problem_value int
|
||
+ problem_mark yaml_mark_t
|
||
+
|
||
+ // The error context.
|
||
+ context string
|
||
+ context_mark yaml_mark_t
|
||
+
|
||
+ // Reader stuff
|
||
+
|
||
+ read_handler yaml_read_handler_t // Read handler.
|
||
+
|
||
+ input_reader io.Reader // File input data.
|
||
+ input []byte // String input data.
|
||
+ input_pos int
|
||
+
|
||
+ eof bool // EOF flag
|
||
+
|
||
+ buffer []byte // The working buffer.
|
||
+ buffer_pos int // The current position of the buffer.
|
||
+
|
||
+ unread int // The number of unread characters in the buffer.
|
||
+
|
||
+ newlines int // The number of line breaks since last non-break/non-blank character
|
||
+
|
||
+ raw_buffer []byte // The raw buffer.
|
||
+ raw_buffer_pos int // The current position of the buffer.
|
||
+
|
||
+ encoding yaml_encoding_t // The input encoding.
|
||
+
|
||
+ offset int // The offset of the current position (in bytes).
|
||
+ mark yaml_mark_t // The mark of the current position.
|
||
+
|
||
+ // Comments
|
||
+
|
||
+ head_comment []byte // The current head comments
|
||
+ line_comment []byte // The current line comments
|
||
+ foot_comment []byte // The current foot comments
|
||
+ tail_comment []byte // Foot comment that happens at the end of a block.
|
||
+ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
|
||
+
|
||
+ comments []yaml_comment_t // The folded comments for all parsed tokens
|
||
+ comments_head int
|
||
+
|
||
+ // Scanner stuff
|
||
+
|
||
+ stream_start_produced bool // Have we started to scan the input stream?
|
||
+ stream_end_produced bool // Have we reached the end of the input stream?
|
||
+
|
||
+ flow_level int // The number of unclosed '[' and '{' indicators.
|
||
+
|
||
+ tokens []yaml_token_t // The tokens queue.
|
||
+ tokens_head int // The head of the tokens queue.
|
||
+ tokens_parsed int // The number of tokens fetched from the queue.
|
||
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
|
||
+
|
||
+ indent int // The current indentation level.
|
||
+ indents []int // The indentation levels stack.
|
||
+
|
||
+ simple_key_allowed bool // May a simple key occur at the current position?
|
||
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
|
||
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
|
||
+
|
||
+ // Parser stuff
|
||
+
|
||
+ state yaml_parser_state_t // The current parser state.
|
||
+ states []yaml_parser_state_t // The parser states stack.
|
||
+ marks []yaml_mark_t // The stack of marks.
|
||
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
|
||
+
|
||
+ // Dumper stuff
|
||
+
|
||
+ aliases []yaml_alias_data_t // The alias data.
|
||
+
|
||
+ document *yaml_document_t // The currently parsed document.
|
||
+}
|
||
+
|
||
+type yaml_comment_t struct {
|
||
+
|
||
+ scan_mark yaml_mark_t // Position where scanning for comments started
|
||
+ token_mark yaml_mark_t // Position after which tokens will be associated with this comment
|
||
+ start_mark yaml_mark_t // Position of '#' comment mark
|
||
+ end_mark yaml_mark_t // Position where comment terminated
|
||
+
|
||
+ head []byte
|
||
+ line []byte
|
||
+ foot []byte
|
||
+}
|
||
+
|
||
+// Emitter Definitions
|
||
+
|
||
+// The prototype of a write handler.
|
||
+//
|
||
+// The write handler is called when the emitter needs to flush the accumulated
|
||
+// characters to the output. The handler should write @a size bytes of the
|
||
+// @a buffer to the output.
|
||
+//
|
||
+// @param[in,out] data A pointer to an application data specified by
|
||
+// yaml_emitter_set_output().
|
||
+// @param[in] buffer The buffer with bytes to be written.
|
||
+// @param[in] size The size of the buffer.
|
||
+//
|
||
+// @returns On success, the handler should return @c 1. If the handler failed,
|
||
+// the returned value should be @c 0.
|
||
+//
|
||
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
||
+
|
||
+type yaml_emitter_state_t int
|
||
+
|
||
+// The emitter states.
|
||
+const (
|
||
+ // Expect STREAM-START.
|
||
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
||
+
|
||
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
|
||
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
|
||
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
|
||
+ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
|
||
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
|
||
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||
+ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
|
||
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
|
||
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
|
||
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
|
||
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
|
||
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
|
||
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
|
||
+ yaml_EMIT_END_STATE // Expect nothing.
|
||
+)
|
||
+
|
||
+// The emitter structure.
|
||
+//
|
||
+// All members are internal. Manage the structure using the @c yaml_emitter_
|
||
+// family of functions.
|
||
+type yaml_emitter_t struct {
|
||
+
|
||
+ // Error handling
|
||
+
|
||
+ error yaml_error_type_t // Error type.
|
||
+ problem string // Error description.
|
||
+
|
||
+ // Writer stuff
|
||
+
|
||
+ write_handler yaml_write_handler_t // Write handler.
|
||
+
|
||
+ output_buffer *[]byte // String output data.
|
||
+ output_writer io.Writer // File output data.
|
||
+
|
||
+ buffer []byte // The working buffer.
|
||
+ buffer_pos int // The current position of the buffer.
|
||
+
|
||
+ raw_buffer []byte // The raw buffer.
|
||
+ raw_buffer_pos int // The current position of the buffer.
|
||
+
|
||
+ encoding yaml_encoding_t // The stream encoding.
|
||
+
|
||
+ // Emitter stuff
|
||
+
|
||
+ canonical bool // If the output is in the canonical style?
|
||
+ best_indent int // The number of indentation spaces.
|
||
+ best_width int // The preferred width of the output lines.
|
||
+ unicode bool // Allow unescaped non-ASCII characters?
|
||
+ line_break yaml_break_t // The preferred line break.
|
||
+
|
||
+ state yaml_emitter_state_t // The current emitter state.
|
||
+ states []yaml_emitter_state_t // The stack of states.
|
||
+
|
||
+ events []yaml_event_t // The event queue.
|
||
+ events_head int // The head of the event queue.
|
||
+
|
||
+ indents []int // The stack of indentation levels.
|
||
+
|
||
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
|
||
+
|
||
+ indent int // The current indentation level.
|
||
+
|
||
+ flow_level int // The current flow level.
|
||
+
|
||
+ root_context bool // Is it the document root context?
|
||
+ sequence_context bool // Is it a sequence context?
|
||
+ mapping_context bool // Is it a mapping context?
|
||
+ simple_key_context bool // Is it a simple mapping key context?
|
||
+
|
||
+ line int // The current line.
|
||
+ column int // The current column.
|
||
+ whitespace bool // If the last character was a whitespace?
|
||
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
|
||
+ open_ended bool // If an explicit document end is required?
|
||
+
|
||
+ space_above bool // Is there's an empty line above?
|
||
+ foot_indent int // The indent used to write the foot comment above, or -1 if none.
|
||
+
|
||
+ // Anchor analysis.
|
||
+ anchor_data struct {
|
||
+ anchor []byte // The anchor value.
|
||
+ alias bool // Is it an alias?
|
||
+ }
|
||
+
|
||
+ // Tag analysis.
|
||
+ tag_data struct {
|
||
+ handle []byte // The tag handle.
|
||
+ suffix []byte // The tag suffix.
|
||
+ }
|
||
+
|
||
+ // Scalar analysis.
|
||
+ scalar_data struct {
|
||
+ value []byte // The scalar value.
|
||
+ multiline bool // Does the scalar contain line breaks?
|
||
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
|
||
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
|
||
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
|
||
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
|
||
+ style yaml_scalar_style_t // The output style.
|
||
+ }
|
||
+
|
||
+ // Comments
|
||
+ head_comment []byte
|
||
+ line_comment []byte
|
||
+ foot_comment []byte
|
||
+ tail_comment []byte
|
||
+
|
||
+ key_line_comment []byte
|
||
+
|
||
+ // Dumper stuff
|
||
+
|
||
+ opened bool // If the stream was already opened?
|
||
+ closed bool // If the stream was already closed?
|
||
+
|
||
+ // The information associated with the document nodes.
|
||
+ anchors *struct {
|
||
+ references int // The number of references.
|
||
+ anchor int // The anchor id.
|
||
+ serialized bool // If the node has been emitted?
|
||
+ }
|
||
+
|
||
+ last_anchor_id int // The last assigned anchor id.
|
||
+
|
||
+ document *yaml_document_t // The currently emitted document.
|
||
+}
|
||
diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
|
||
new file mode 100644
|
||
index 000000000000..e88f9c54aecb
|
||
--- /dev/null
|
||
+++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
|
||
@@ -0,0 +1,198 @@
|
||
+//
|
||
+// Copyright (c) 2011-2019 Canonical Ltd
|
||
+// Copyright (c) 2006-2010 Kirill Simonov
|
||
+//
|
||
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||
+// this software and associated documentation files (the "Software"), to deal in
|
||
+// the Software without restriction, including without limitation the rights to
|
||
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||
+// of the Software, and to permit persons to whom the Software is furnished to do
|
||
+// so, subject to the following conditions:
|
||
+//
|
||
+// The above copyright notice and this permission notice shall be included in all
|
||
+// copies or substantial portions of the Software.
|
||
+//
|
||
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+// SOFTWARE.
|
||
+
|
||
+package yaml
|
||
+
|
||
+const (
|
||
+ // The size of the input raw buffer.
|
||
+ input_raw_buffer_size = 512
|
||
+
|
||
+ // The size of the input buffer.
|
||
+ // It should be possible to decode the whole raw buffer.
|
||
+ input_buffer_size = input_raw_buffer_size * 3
|
||
+
|
||
+ // The size of the output buffer.
|
||
+ output_buffer_size = 128
|
||
+
|
||
+ // The size of the output raw buffer.
|
||
+ // It should be possible to encode the whole output buffer.
|
||
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
|
||
+
|
||
+ // The size of other stacks and queues.
|
||
+ initial_stack_size = 16
|
||
+ initial_queue_size = 16
|
||
+ initial_string_size = 16
|
||
+)
|
||
+
|
||
+// Check if the character at the specified position is an alphabetical
|
||
+// character, a digit, '_', or '-'.
|
||
+func is_alpha(b []byte, i int) bool {
|
||
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
|
||
+}
|
||
+
|
||
+// Check if the character at the specified position is a digit.
|
||
+func is_digit(b []byte, i int) bool {
|
||
+ return b[i] >= '0' && b[i] <= '9'
|
||
+}
|
||
+
|
||
+// Get the value of a digit.
|
||
+func as_digit(b []byte, i int) int {
|
||
+ return int(b[i]) - '0'
|
||
+}
|
||
+
|
||
+// Check if the character at the specified position is a hex-digit.
|
||
+func is_hex(b []byte, i int) bool {
|
||
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
|
||
+}
|
||
+
|
||
+// Get the value of a hex-digit.
|
||
+func as_hex(b []byte, i int) int {
|
||
+ bi := b[i]
|
||
+ if bi >= 'A' && bi <= 'F' {
|
||
+ return int(bi) - 'A' + 10
|
||
+ }
|
||
+ if bi >= 'a' && bi <= 'f' {
|
||
+ return int(bi) - 'a' + 10
|
||
+ }
|
||
+ return int(bi) - '0'
|
||
+}
|
||
+
|
||
+// Check if the character is ASCII.
|
||
+func is_ascii(b []byte, i int) bool {
|
||
+ return b[i] <= 0x7F
|
||
+}
|
||
+
|
||
+// Check if the character at the start of the buffer can be printed unescaped.
|
||
+func is_printable(b []byte, i int) bool {
|
||
+ return ((b[i] == 0x0A) || // . == #x0A
|
||
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
|
||
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
|
||
+ (b[i] > 0xC2 && b[i] < 0xED) ||
|
||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
|
||
+ (b[i] == 0xEE) ||
|
||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
|
||
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
|
||
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
||
+}
|
||
+
|
||
+// Check if the character at the specified position is NUL.
|
||
+func is_z(b []byte, i int) bool {
|
||
+ return b[i] == 0x00
|
||
+}
|
||
+
|
||
+// Check if the beginning of the buffer is a BOM.
|
||
+func is_bom(b []byte, i int) bool {
|
||
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
|
||
+}
|
||
+
|
||
+// Check if the character at the specified position is space.
|
||
+func is_space(b []byte, i int) bool {
|
||
+ return b[i] == ' '
|
||
+}
|
||
+
|
||
+// Check if the character at the specified position is tab.
|
||
+func is_tab(b []byte, i int) bool {
|
||
+ return b[i] == '\t'
|
||
+}
|
||
+
|
||
+// Check if the character at the specified position is blank (space or tab).
|
||
+func is_blank(b []byte, i int) bool {
|
||
+ //return is_space(b, i) || is_tab(b, i)
|
||
+ return b[i] == ' ' || b[i] == '\t'
|
||
+}
|
||
+
|
||
+// Check if the character at the specified position is a line break.
|
||
+func is_break(b []byte, i int) bool {
|
||
+ return (b[i] == '\r' || // CR (#xD)
|
||
+ b[i] == '\n' || // LF (#xA)
|
||
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
|
||
+}
|
||
+
|
||
+func is_crlf(b []byte, i int) bool {
|
||
+ return b[i] == '\r' && b[i+1] == '\n'
|
||
+}
|
||
+
|
||
+// Check if the character is a line break or NUL.
|
||
+func is_breakz(b []byte, i int) bool {
|
||
+ //return is_break(b, i) || is_z(b, i)
|
||
+ return (
|
||
+ // is_break:
|
||
+ b[i] == '\r' || // CR (#xD)
|
||
+ b[i] == '\n' || // LF (#xA)
|
||
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||
+ // is_z:
|
||
+ b[i] == 0)
|
||
+}
|
||
+
|
||
+// Check if the character is a line break, space, or NUL.
|
||
+func is_spacez(b []byte, i int) bool {
|
||
+ //return is_space(b, i) || is_breakz(b, i)
|
||
+ return (
|
||
+ // is_space:
|
||
+ b[i] == ' ' ||
|
||
+ // is_breakz:
|
||
+ b[i] == '\r' || // CR (#xD)
|
||
+ b[i] == '\n' || // LF (#xA)
|
||
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||
+ b[i] == 0)
|
||
+}
|
||
+
|
||
+// Check if the character is a line break, space, tab, or NUL.
|
||
+func is_blankz(b []byte, i int) bool {
|
||
+ //return is_blank(b, i) || is_breakz(b, i)
|
||
+ return (
|
||
+ // is_blank:
|
||
+ b[i] == ' ' || b[i] == '\t' ||
|
||
+ // is_breakz:
|
||
+ b[i] == '\r' || // CR (#xD)
|
||
+ b[i] == '\n' || // LF (#xA)
|
||
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||
+ b[i] == 0)
|
||
+}
|
||
+
|
||
+// Determine the width of the character.
|
||
+func width(b byte) int {
|
||
+ // Don't replace these by a switch without first
|
||
+ // confirming that it is being inlined.
|
||
+ if b&0x80 == 0x00 {
|
||
+ return 1
|
||
+ }
|
||
+ if b&0xE0 == 0xC0 {
|
||
+ return 2
|
||
+ }
|
||
+ if b&0xF0 == 0xE0 {
|
||
+ return 3
|
||
+ }
|
||
+ if b&0xF8 == 0xF0 {
|
||
+ return 4
|
||
+ }
|
||
+ return 0
|
||
+
|
||
+}
|
||
diff --git a/vendor/modules.txt b/vendor/modules.txt
|
||
index 4e0448570ce9..577e9de880c6 100644
|
||
--- a/vendor/modules.txt
|
||
+++ b/vendor/modules.txt
|
||
@@ -33,12 +33,20 @@ github.com/containerd/log
|
||
# github.com/containerd/platforms v0.2.0
|
||
## explicit; go 1.20
|
||
github.com/containerd/platforms
|
||
+# github.com/cpuguy83/go-md2man/v2 v2.0.3
|
||
+## explicit; go 1.11
|
||
+github.com/cpuguy83/go-md2man/v2
|
||
+github.com/cpuguy83/go-md2man/v2/md2man
|
||
# github.com/creack/pty v1.1.21
|
||
## explicit; go 1.13
|
||
github.com/creack/pty
|
||
# github.com/distribution/reference v0.5.0
|
||
## explicit; go 1.20
|
||
github.com/distribution/reference
|
||
+# github.com/docker/cli-docs-tool v0.6.0
|
||
+## explicit; go 1.18
|
||
+github.com/docker/cli-docs-tool
|
||
+github.com/docker/cli-docs-tool/annotation
|
||
# github.com/docker/distribution v2.8.3+incompatible
|
||
## explicit
|
||
github.com/docker/distribution
|
||
@@ -252,12 +260,16 @@ github.com/prometheus/procfs/internal/util
|
||
# github.com/rivo/uniseg v0.2.0
|
||
## explicit; go 1.12
|
||
github.com/rivo/uniseg
|
||
+# github.com/russross/blackfriday/v2 v2.1.0
|
||
+## explicit
|
||
+github.com/russross/blackfriday/v2
|
||
# github.com/sirupsen/logrus v1.9.3
|
||
## explicit; go 1.13
|
||
github.com/sirupsen/logrus
|
||
# github.com/spf13/cobra v1.8.0
|
||
## explicit; go 1.15
|
||
github.com/spf13/cobra
|
||
+github.com/spf13/cobra/doc
|
||
# github.com/spf13/pflag v1.0.5
|
||
## explicit; go 1.12
|
||
github.com/spf13/pflag
|
||
@@ -498,6 +510,9 @@ google.golang.org/protobuf/types/known/wrapperspb
|
||
# gopkg.in/yaml.v2 v2.4.0
|
||
## explicit; go 1.15
|
||
gopkg.in/yaml.v2
|
||
+# gopkg.in/yaml.v3 v3.0.1
|
||
+## explicit
|
||
+gopkg.in/yaml.v3
|
||
# gotest.tools/v3 v3.5.1
|
||
## explicit; go 1.17
|
||
gotest.tools/v3/assert
|
||
--
|
||
2.45.2
|
||
|