Update Docker and containerd. OBS-URL: https://build.opensuse.org/request/show/1120879 OBS-URL: https://build.opensuse.org/package/show/Virtualization:containers/docker?expand=0&rev=397
31968 lines
1.1 MiB
31968 lines
1.1 MiB
From 8d493d2edca8a36e0957d6d5566a0fbc02261e69 Mon Sep 17 00:00:00 2001
|
||
From: Aleksa Sarai <asarai@suse.de>
|
||
Date: Wed, 26 Apr 2023 10:13:48 +1000
|
||
Subject: [PATCH] docs: include required tools in source tree
|
||
|
||
In order to be able to build the documentation without internet access
|
||
(as is required by some distribution build systems), all of the source
|
||
code needed for the build needs to be available in the source tarball.
|
||
|
||
This used to be possible with the docker-cli sources but was
|
||
accidentally broken with some CI changes that switched to downloading
|
||
the tools (by modifying go.mod as part of the docs build script).
|
||
|
||
This pattern also maked documentation builds less reproducible since the
|
||
tool version used was not based on the source code version.
|
||
|
||
Fixes: commit 7dc35c03fca5 ("validate manpages target")
|
||
Fixes: commit a650f4ddd008 ("switch to cli-docs-tool for yaml docs generation")
|
||
Signed-off-by: Aleksa Sarai <asarai@suse.de>
|
||
---
|
||
docs/generate/go.mod | 13 -
|
||
docs/generate/tools.go | 8 -
|
||
import.go | 17 +
|
||
man/tools.go | 11 -
|
||
scripts/docs/generate-man.sh | 33 +-
|
||
scripts/docs/generate-md.sh | 27 +-
|
||
scripts/docs/generate-yaml.sh | 29 +-
|
||
vendor.mod | 13 +-
|
||
vendor.sum | 46 +-
|
||
vendor/github.com/cespare/xxhash/v2/README.md | 31 +-
|
||
.../github.com/cespare/xxhash/v2/testall.sh | 10 +
|
||
vendor/github.com/cespare/xxhash/v2/xxhash.go | 47 +-
|
||
.../cespare/xxhash/v2/xxhash_amd64.s | 336 +-
|
||
.../cespare/xxhash/v2/xxhash_arm64.s | 183 +
|
||
.../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 +
|
||
.../cespare/xxhash/v2/xxhash_other.go | 22 +-
|
||
.../cespare/xxhash/v2/xxhash_safe.go | 1 +
|
||
.../cespare/xxhash/v2/xxhash_unsafe.go | 3 +-
|
||
.../cpuguy83/go-md2man/v2/.gitignore | 2 +
|
||
.../cpuguy83/go-md2man/v2/.golangci.yml | 6 +
|
||
.../cpuguy83/go-md2man/v2/Dockerfile | 20 +
|
||
.../cpuguy83/go-md2man/v2/LICENSE.md | 21 +
|
||
.../github.com/cpuguy83/go-md2man/v2/Makefile | 35 +
|
||
.../cpuguy83/go-md2man/v2/README.md | 15 +
|
||
.../cpuguy83/go-md2man/v2/go-md2man.1.md | 28 +
|
||
.../cpuguy83/go-md2man/v2/md2man.go | 53 +
|
||
.../cpuguy83/go-md2man/v2/md2man/md2man.go | 16 +
|
||
.../cpuguy83/go-md2man/v2/md2man/roff.go | 348 ++
|
||
.../docker/cli-docs-tool/.dockerignore | 2 +
|
||
.../docker/cli-docs-tool/.gitignore | 2 +
|
||
.../docker/cli-docs-tool/.golangci.yml | 37 +
|
||
.../docker/cli-docs-tool/Dockerfile | 86 +
|
||
.../github.com/docker/cli-docs-tool/LICENSE | 202 ++
|
||
.../github.com/docker/cli-docs-tool/README.md | 67 +
|
||
.../cli-docs-tool/annotation/annotation.go | 25 +
|
||
.../docker/cli-docs-tool/clidocstool.go | 123 +
|
||
.../docker/cli-docs-tool/clidocstool_md.go | 280 ++
|
||
.../docker/cli-docs-tool/clidocstool_yaml.go | 435 +++
|
||
.../docker/cli-docs-tool/docker-bake.hcl | 51 +
|
||
.../docker/cli-docs-tool/markdown.go | 87 +
|
||
.../russross/blackfriday/v2/.gitignore | 8 +
|
||
.../russross/blackfriday/v2/.travis.yml | 17 +
|
||
.../russross/blackfriday/v2/LICENSE.txt | 29 +
|
||
.../russross/blackfriday/v2/README.md | 335 ++
|
||
.../russross/blackfriday/v2/block.go | 1612 +++++++++
|
||
.../github.com/russross/blackfriday/v2/doc.go | 46 +
|
||
.../russross/blackfriday/v2/entities.go | 2236 ++++++++++++
|
||
.../github.com/russross/blackfriday/v2/esc.go | 70 +
|
||
.../russross/blackfriday/v2/html.go | 952 ++++++
|
||
.../russross/blackfriday/v2/inline.go | 1228 +++++++
|
||
.../russross/blackfriday/v2/markdown.go | 950 ++++++
|
||
.../russross/blackfriday/v2/node.go | 360 ++
|
||
.../russross/blackfriday/v2/smartypants.go | 457 +++
|
||
vendor/github.com/spf13/cobra/doc/README.md | 17 +
|
||
vendor/github.com/spf13/cobra/doc/man_docs.go | 246 ++
|
||
vendor/github.com/spf13/cobra/doc/man_docs.md | 31 +
|
||
vendor/github.com/spf13/cobra/doc/md_docs.go | 156 +
|
||
vendor/github.com/spf13/cobra/doc/md_docs.md | 115 +
|
||
.../github.com/spf13/cobra/doc/rest_docs.go | 186 +
|
||
.../github.com/spf13/cobra/doc/rest_docs.md | 114 +
|
||
vendor/github.com/spf13/cobra/doc/util.go | 52 +
|
||
.../github.com/spf13/cobra/doc/yaml_docs.go | 175 +
|
||
.../github.com/spf13/cobra/doc/yaml_docs.md | 112 +
|
||
.../googleapis/rpc/status/status.pb.go | 10 +-
|
||
.../grpc/attributes/attributes.go | 2 +-
|
||
vendor/google.golang.org/grpc/backoff.go | 2 +-
|
||
.../grpc/balancer/balancer.go | 31 +
|
||
.../grpc/balancer/base/balancer.go | 4 +-
|
||
.../grpc/balancer/conn_state_evaluator.go | 12 +-
|
||
.../grpc/balancer_conn_wrappers.go | 71 +-
|
||
.../grpc_binarylog_v1/binarylog.pb.go | 20 +-
|
||
.../grpc/channelz/channelz.go | 2 +-
|
||
vendor/google.golang.org/grpc/clientconn.go | 173 +-
|
||
.../grpc/credentials/credentials.go | 20 +-
|
||
.../google.golang.org/grpc/credentials/tls.go | 6 +-
|
||
vendor/google.golang.org/grpc/dialoptions.go | 25 +-
|
||
.../grpc/encoding/encoding.go | 7 +-
|
||
.../grpc/grpclog/loggerv2.go | 9 +-
|
||
.../grpc/internal/binarylog/env_config.go | 18 +-
|
||
.../grpc/internal/binarylog/method_logger.go | 128 +-
|
||
.../grpc/internal/binarylog/sink.go | 12 +-
|
||
.../grpc/internal/channelz/types.go | 16 +-
|
||
.../grpc/internal/envconfig/envconfig.go | 39 +-
|
||
.../grpc/internal/envconfig/xds.go | 31 +-
|
||
.../grpc/internal/grpclog/grpclog.go | 2 +-
|
||
.../grpc/internal/grpcsync/oncefunc.go | 32 +
|
||
.../grpc/internal/grpcutil/compressor.go | 47 +
|
||
.../grpc/internal/grpcutil/method.go | 1 -
|
||
.../grpc/internal/internal.go | 3 +
|
||
.../internal/resolver/dns/dns_resolver.go | 6 +-
|
||
.../resolver/passthrough/passthrough.go | 11 +-
|
||
.../grpc/internal/resolver/unix/unix.go | 4 +-
|
||
.../internal/serviceconfig/serviceconfig.go | 8 +-
|
||
.../grpc/internal/status/status.go | 10 +
|
||
.../grpc/internal/transport/controlbuf.go | 62 +-
|
||
.../grpc/internal/transport/defaults.go | 6 +
|
||
.../grpc/internal/transport/handler_server.go | 53 +-
|
||
.../grpc/internal/transport/http2_client.go | 261 +-
|
||
.../grpc/internal/transport/http2_server.go | 142 +-
|
||
.../grpc/internal/transport/transport.go | 16 +-
|
||
.../grpc/metadata/metadata.go | 20 +-
|
||
.../google.golang.org/grpc/picker_wrapper.go | 35 +-
|
||
vendor/google.golang.org/grpc/pickfirst.go | 6 +-
|
||
vendor/google.golang.org/grpc/preloader.go | 2 +-
|
||
vendor/google.golang.org/grpc/regenerate.sh | 7 +-
|
||
.../grpc/resolver/resolver.go | 36 +-
|
||
vendor/google.golang.org/grpc/rpc_util.go | 56 +-
|
||
vendor/google.golang.org/grpc/server.go | 79 +-
|
||
.../google.golang.org/grpc/service_config.go | 10 +-
|
||
.../grpc/serviceconfig/serviceconfig.go | 2 +-
|
||
.../google.golang.org/grpc/status/status.go | 12 +-
|
||
vendor/google.golang.org/grpc/stream.go | 62 +-
|
||
vendor/google.golang.org/grpc/tap/tap.go | 2 +-
|
||
vendor/google.golang.org/grpc/version.go | 2 +-
|
||
vendor/google.golang.org/grpc/vet.sh | 29 +-
|
||
.../protobuf/encoding/protojson/doc.go | 2 +-
|
||
.../encoding/protojson/well_known_types.go | 12 +-
|
||
.../protobuf/encoding/protowire/wire.go | 8 +-
|
||
.../protobuf/internal/encoding/json/decode.go | 2 +-
|
||
.../protobuf/internal/encoding/text/decode.go | 5 +-
|
||
.../internal/encoding/text/decode_number.go | 43 +-
|
||
.../protobuf/internal/genid/descriptor_gen.go | 90 +-
|
||
.../protobuf/internal/impl/convert.go | 1 -
|
||
.../protobuf/internal/strs/strings_unsafe.go | 2 +-
|
||
.../protobuf/internal/version/version.go | 4 +-
|
||
.../google.golang.org/protobuf/proto/doc.go | 9 +-
|
||
.../google.golang.org/protobuf/proto/equal.go | 172 +-
|
||
.../reflect/protoreflect/source_gen.go | 14 +
|
||
.../protobuf/reflect/protoreflect/value.go | 2 +-
|
||
.../reflect/protoreflect/value_equal.go | 168 +
|
||
.../reflect/protoreflect/value_union.go | 4 +-
|
||
.../reflect/protoregistry/registry.go | 2 +-
|
||
.../types/descriptorpb/descriptor.pb.go | 1547 +++++----
|
||
.../protobuf/types/known/anypb/any.pb.go | 135 +-
|
||
.../types/known/durationpb/duration.pb.go | 63 +-
|
||
.../types/known/timestamppb/timestamp.pb.go | 61 +-
|
||
vendor/gopkg.in/yaml.v3/LICENSE | 50 +
|
||
vendor/gopkg.in/yaml.v3/NOTICE | 13 +
|
||
vendor/gopkg.in/yaml.v3/README.md | 150 +
|
||
vendor/gopkg.in/yaml.v3/apic.go | 747 ++++
|
||
vendor/gopkg.in/yaml.v3/decode.go | 1000 ++++++
|
||
vendor/gopkg.in/yaml.v3/emitterc.go | 2020 +++++++++++
|
||
vendor/gopkg.in/yaml.v3/encode.go | 577 ++++
|
||
vendor/gopkg.in/yaml.v3/parserc.go | 1258 +++++++
|
||
vendor/gopkg.in/yaml.v3/readerc.go | 434 +++
|
||
vendor/gopkg.in/yaml.v3/resolve.go | 326 ++
|
||
vendor/gopkg.in/yaml.v3/scannerc.go | 3038 +++++++++++++++++
|
||
vendor/gopkg.in/yaml.v3/sorter.go | 134 +
|
||
vendor/gopkg.in/yaml.v3/writerc.go | 48 +
|
||
vendor/gopkg.in/yaml.v3/yaml.go | 698 ++++
|
||
vendor/gopkg.in/yaml.v3/yamlh.go | 807 +++++
|
||
vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 ++
|
||
vendor/modules.txt | 25 +-
|
||
153 files changed, 25896 insertions(+), 1912 deletions(-)
|
||
delete mode 100644 docs/generate/go.mod
|
||
delete mode 100644 docs/generate/tools.go
|
||
create mode 100644 import.go
|
||
delete mode 100644 man/tools.go
|
||
create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh
|
||
create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
|
||
rename vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%)
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/.gitignore
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/Makefile
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/README.md
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man.go
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
|
||
create mode 100644 vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/.dockerignore
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/.gitignore
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/.golangci.yml
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/Dockerfile
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/LICENSE
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/README.md
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/annotation/annotation.go
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/clidocstool.go
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/clidocstool_md.go
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/docker-bake.hcl
|
||
create mode 100644 vendor/github.com/docker/cli-docs-tool/markdown.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/.gitignore
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/.travis.yml
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/LICENSE.txt
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/README.md
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/block.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/doc.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/entities.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/esc.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/html.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/inline.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/markdown.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/node.go
|
||
create mode 100644 vendor/github.com/russross/blackfriday/v2/smartypants.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/README.md
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/man_docs.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/man_docs.md
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/md_docs.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/md_docs.md
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/rest_docs.md
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/util.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.go
|
||
create mode 100644 vendor/github.com/spf13/cobra/doc/yaml_docs.md
|
||
create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
|
||
create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
|
||
create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/README.md
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/apic.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/decode.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/encode.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go
|
||
create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go
|
||
|
||
diff --git a/docs/generate/go.mod b/docs/generate/go.mod
|
||
deleted file mode 100644
|
||
index d62ff455713a..000000000000
|
||
--- a/docs/generate/go.mod
|
||
+++ /dev/null
|
||
@@ -1,13 +0,0 @@
|
||
-module github.com/docker/cli/docs/generate
|
||
-
|
||
-// dummy go.mod to avoid dealing with dependencies specific
|
||
-// to docs generation and not really part of the project.
|
||
-
|
||
-go 1.16
|
||
-
|
||
-//require (
|
||
-// github.com/docker/cli v0.0.0+incompatible
|
||
-// github.com/docker/cli-docs-tool v0.5.0
|
||
-//)
|
||
-//
|
||
-//replace github.com/docker/cli v0.0.0+incompatible => ../../
|
||
diff --git a/docs/generate/tools.go b/docs/generate/tools.go
|
||
deleted file mode 100644
|
||
index 47510bc49a89..000000000000
|
||
--- a/docs/generate/tools.go
|
||
+++ /dev/null
|
||
@@ -1,8 +0,0 @@
|
||
-//go:build tools
|
||
-// +build tools
|
||
-
|
||
-package main
|
||
-
|
||
-import (
|
||
- _ "github.com/docker/cli-docs-tool"
|
||
-)
|
||
diff --git a/import.go b/import.go
|
||
new file mode 100644
|
||
index 000000000000..662a6055146c
|
||
--- /dev/null
|
||
+++ b/import.go
|
||
@@ -0,0 +1,17 @@
|
||
+// This is only used to define imports we need for doc generation.
|
||
+
|
||
+//go:build never
|
||
+// +build never
|
||
+
|
||
+package cli
|
||
+
|
||
+import (
|
||
+ // Used for md and yaml doc generation.
|
||
+ _ "github.com/docker/cli-docs-tool"
|
||
+
|
||
+ // Used for man page generation.
|
||
+ _ "github.com/cpuguy83/go-md2man/v2"
|
||
+ _ "github.com/spf13/cobra"
|
||
+ _ "github.com/spf13/cobra/doc"
|
||
+ _ "github.com/spf13/pflag"
|
||
+)
|
||
diff --git a/man/tools.go b/man/tools.go
|
||
deleted file mode 100644
|
||
index 3cafe6533aff..000000000000
|
||
--- a/man/tools.go
|
||
+++ /dev/null
|
||
@@ -1,11 +0,0 @@
|
||
-//go:build tools
|
||
-// +build tools
|
||
-
|
||
-package main
|
||
-
|
||
-import (
|
||
- _ "github.com/cpuguy83/go-md2man/v2"
|
||
- _ "github.com/spf13/cobra"
|
||
- _ "github.com/spf13/cobra/doc"
|
||
- _ "github.com/spf13/pflag"
|
||
-)
|
||
diff --git a/scripts/docs/generate-man.sh b/scripts/docs/generate-man.sh
|
||
index 12a4b81199db..1e12a95e9c9a 100755
|
||
--- a/scripts/docs/generate-man.sh
|
||
+++ b/scripts/docs/generate-man.sh
|
||
@@ -1,35 +1,22 @@
|
||
#!/usr/bin/env bash
|
||
|
||
-set -eu
|
||
-
|
||
-: "${MD2MAN_VERSION=v2.0.3}"
|
||
+set -Eeuo pipefail
|
||
|
||
export GO111MODULE=auto
|
||
|
||
-function clean {
|
||
- rm -rf "$buildir"
|
||
+# temporary "go.mod" to make -modfile= work
|
||
+touch go.mod
|
||
+
|
||
+function clean() {
|
||
+ rm -f "$(pwd)/go.mod"
|
||
}
|
||
|
||
-buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX)
|
||
trap clean EXIT
|
||
|
||
-(
|
||
- set -x
|
||
- cp -r . "$buildir/"
|
||
- cd "$buildir"
|
||
- # init dummy go.mod
|
||
- ./scripts/vendor init
|
||
- # install go-md2man and copy man/tools.go in root folder
|
||
- # to be able to fetch the required dependencies
|
||
- go mod edit -modfile=vendor.mod -require=github.com/cpuguy83/go-md2man/v2@${MD2MAN_VERSION}
|
||
- cp man/tools.go .
|
||
- # update vendor
|
||
- ./scripts/vendor update
|
||
- # build gen-manpages
|
||
- go build -mod=vendor -modfile=vendor.mod -tags manpages -o /tmp/gen-manpages ./man/generate.go
|
||
- # build go-md2man
|
||
- go build -mod=vendor -modfile=vendor.mod -o /tmp/go-md2man ./vendor/github.com/cpuguy83/go-md2man/v2
|
||
-)
|
||
+# build gen-manpages
|
||
+go build -mod=vendor -modfile=vendor.mod -tags manpages -o /tmp/gen-manpages ./man/generate.go
|
||
+# build go-md2man
|
||
+go build -mod=vendor -modfile=vendor.mod -o /tmp/go-md2man ./vendor/github.com/cpuguy83/go-md2man/v2
|
||
|
||
mkdir -p man/man1
|
||
(set -x ; /tmp/gen-manpages --root "." --target "$(pwd)/man/man1")
|
||
diff --git a/scripts/docs/generate-md.sh b/scripts/docs/generate-md.sh
|
||
index 7b49c39341ec..0af86843bbe4 100755
|
||
--- a/scripts/docs/generate-md.sh
|
||
+++ b/scripts/docs/generate-md.sh
|
||
@@ -1,36 +1,23 @@
|
||
#!/usr/bin/env bash
|
||
|
||
-set -eu
|
||
-
|
||
-: "${CLI_DOCS_TOOL_VERSION=v0.6.0}"
|
||
+set -Eeuo pipefail
|
||
|
||
export GO111MODULE=auto
|
||
|
||
+# temporary "go.mod" to make -modfile= work
|
||
+touch go.mod
|
||
+
|
||
function clean {
|
||
- rm -rf "$buildir"
|
||
+ rm -f "$(pwd)/go.mod"
|
||
if [ -f "$(pwd)/docs/reference/commandline/docker.md" ]; then
|
||
mv "$(pwd)/docs/reference/commandline/docker.md" "$(pwd)/docs/reference/commandline/cli.md"
|
||
fi
|
||
}
|
||
|
||
-buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX)
|
||
trap clean EXIT
|
||
|
||
-(
|
||
- set -x
|
||
- cp -r . "$buildir/"
|
||
- cd "$buildir"
|
||
- # init dummy go.mod
|
||
- ./scripts/vendor init
|
||
- # install cli-docs-tool and copy docs/tools.go in root folder
|
||
- # to be able to fetch the required depedencies
|
||
- go mod edit -modfile=vendor.mod -require=github.com/docker/cli-docs-tool@${CLI_DOCS_TOOL_VERSION}
|
||
- cp docs/generate/tools.go .
|
||
- # update vendor
|
||
- ./scripts/vendor update
|
||
- # build docsgen
|
||
- go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go
|
||
-)
|
||
+# build docsgen
|
||
+go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go
|
||
|
||
# yaml generation on docs repo needs the cli.md file: https://github.com/docker/cli/pull/3924#discussion_r1059986605
|
||
# but markdown generation docker.md atm. While waiting for a fix in cli-docs-tool
|
||
diff --git a/scripts/docs/generate-yaml.sh b/scripts/docs/generate-yaml.sh
|
||
index 4d0006e43e79..7d98e161df5d 100755
|
||
--- a/scripts/docs/generate-yaml.sh
|
||
+++ b/scripts/docs/generate-yaml.sh
|
||
@@ -1,33 +1,20 @@
|
||
#!/usr/bin/env bash
|
||
|
||
-set -eu
|
||
-
|
||
-: "${CLI_DOCS_TOOL_VERSION=v0.5.1}"
|
||
+set -Eeuo pipefail
|
||
|
||
export GO111MODULE=auto
|
||
|
||
-function clean {
|
||
- rm -rf "$buildir"
|
||
+# temporary "go.mod" to make -modfile= work
|
||
+touch go.mod
|
||
+
|
||
+function clean() {
|
||
+ rm -f "$(pwd)/go.mod"
|
||
}
|
||
|
||
-buildir=$(mktemp -d -t docker-cli-docsgen.XXXXXXXXXX)
|
||
trap clean EXIT
|
||
|
||
-(
|
||
- set -x
|
||
- cp -r . "$buildir/"
|
||
- cd "$buildir"
|
||
- # init dummy go.mod
|
||
- ./scripts/vendor init
|
||
- # install cli-docs-tool and copy docs/tools.go in root folder
|
||
- # to be able to fetch the required depedencies
|
||
- go mod edit -modfile=vendor.mod -require=github.com/docker/cli-docs-tool@${CLI_DOCS_TOOL_VERSION}
|
||
- cp docs/generate/tools.go .
|
||
- # update vendor
|
||
- ./scripts/vendor update
|
||
- # build docsgen
|
||
- go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go
|
||
-)
|
||
+# build docsgen
|
||
+go build -mod=vendor -modfile=vendor.mod -tags docsgen -o /tmp/docsgen ./docs/generate/generate.go
|
||
|
||
mkdir -p docs/yaml
|
||
set -x
|
||
diff --git a/vendor.mod b/vendor.mod
|
||
index faa76e36787e..0c724da90ad8 100644
|
||
--- a/vendor.mod
|
||
+++ b/vendor.mod
|
||
@@ -9,6 +9,7 @@ go 1.19
|
||
require (
|
||
github.com/containerd/containerd v1.6.21
|
||
github.com/creack/pty v1.1.18
|
||
+ github.com/docker/cli-docs-tool v0.6.0
|
||
github.com/docker/distribution v2.8.2+incompatible
|
||
github.com/docker/docker v24.0.6+incompatible
|
||
github.com/docker/docker-credential-helpers v0.7.0
|
||
@@ -45,11 +46,13 @@ require (
|
||
gotest.tools/v3 v3.5.0
|
||
)
|
||
|
||
+require github.com/cpuguy83/go-md2man/v2 v2.0.3
|
||
+
|
||
require (
|
||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||
github.com/beorn7/perks v1.0.1 // indirect
|
||
- github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||
github.com/docker/go-metrics v0.0.1 // indirect
|
||
@@ -67,13 +70,15 @@ require (
|
||
github.com/prometheus/common v0.37.0 // indirect
|
||
github.com/prometheus/procfs v0.8.0 // indirect
|
||
github.com/rivo/uniseg v0.2.0 // indirect
|
||
+ github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
||
golang.org/x/crypto v0.14.0 // indirect
|
||
golang.org/x/net v0.17.0 // indirect
|
||
golang.org/x/time v0.3.0 // indirect
|
||
- google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 // indirect
|
||
- google.golang.org/grpc v1.50.1 // indirect
|
||
- google.golang.org/protobuf v1.28.1 // indirect
|
||
+ google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
|
||
+ google.golang.org/grpc v1.53.0 // indirect
|
||
+ google.golang.org/protobuf v1.30.0 // indirect
|
||
+ gopkg.in/yaml.v3 v3.0.1 // indirect
|
||
)
|
||
diff --git a/vendor.sum b/vendor.sum
|
||
index 0c5e44d643ce..1102efc6b68d 100644
|
||
--- a/vendor.sum
|
||
+++ b/vendor.sum
|
||
@@ -47,7 +47,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||
@@ -65,8 +64,9 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
|
||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||
@@ -74,11 +74,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
|
||
github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5 h1:PqZ3bA4yzwywivzk7PBQWngJp2/PAS0bWRZerKteicY=
|
||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||
-github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
|
||
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||
@@ -86,6 +81,8 @@ github.com/containerd/containerd v1.6.21 h1:eSTAmnvDKRPWan+MpSSfNyrtleXd86ogK9X8
|
||
github.com/containerd/containerd v1.6.21/go.mod h1:apei1/i5Ux2FzrK6+DM/suEsGuK/MeVOfy8tR2q7Wnw=
|
||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||
+github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||
+github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||
@@ -93,6 +90,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||
+github.com/docker/cli-docs-tool v0.6.0 h1:Z9x10SaZgFaB6jHgz3OWooynhSa40CsWkpe5hEnG/qA=
|
||
+github.com/docker/cli-docs-tool v0.6.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
|
||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||
@@ -117,15 +116,12 @@ github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7Bv
|
||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||
-github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||
github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo=
|
||
github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
|
||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||
@@ -190,7 +186,6 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||
@@ -206,13 +201,11 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
|
||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||
@@ -348,8 +341,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5
|
||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||
@@ -403,7 +396,6 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||
@@ -478,7 +470,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||
@@ -540,12 +531,9 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||
@@ -565,7 +553,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
|
||
@@ -667,15 +654,14 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
|
||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||
-google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 h1:7YDGQC/0sigNGzsEWyb9s72jTxlFdwVEYNJHbfQ+Dtg=
|
||
-google.golang.org/genproto v0.0.0-20220706185917-7780775163c4/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||
+google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
|
||
+google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
|
||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||
@@ -689,11 +675,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
|
||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||
-google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||
-google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY=
|
||
-google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
|
||
+google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||
+google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||
@@ -706,10 +689,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||
gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
|
||
@@ -727,7 +708,6 @@ gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllE
|
||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
|
||
index 792b4a60b346..8bf0e5b78153 100644
|
||
--- a/vendor/github.com/cespare/xxhash/v2/README.md
|
||
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
|
||
@@ -3,8 +3,7 @@
|
||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||
|
||
-xxhash is a Go implementation of the 64-bit
|
||
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||
high-quality hashing algorithm that is much faster than anything in the Go
|
||
standard library.
|
||
|
||
@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
|
||
func (*Digest) Sum64() uint64
|
||
```
|
||
|
||
-This implementation provides a fast pure-Go implementation and an even faster
|
||
-assembly implementation for amd64.
|
||
+The package is written with optimized pure Go and also contains even faster
|
||
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||
+opts into using the Go code even on those architectures.
|
||
+
|
||
+[xxHash]: http://cyan4973.github.io/xxHash/
|
||
|
||
## Compatibility
|
||
|
||
@@ -45,19 +47,20 @@ I recommend using the latest release of Go.
|
||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||
implementations of Sum64.
|
||
|
||
-| input size | purego | asm |
|
||
-| --- | --- | --- |
|
||
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||
+| input size | purego | asm |
|
||
+| ---------- | --------- | --------- |
|
||
+| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||
+| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||
+| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||
|
||
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||
-the following commands under Go 1.11.2:
|
||
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||
+CPU using the following commands under Go 1.19.2:
|
||
|
||
```
|
||
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||
```
|
||
|
||
## Projects using this package
|
||
diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh
|
||
new file mode 100644
|
||
index 000000000000..94b9c443987c
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cespare/xxhash/v2/testall.sh
|
||
@@ -0,0 +1,10 @@
|
||
+#!/bin/bash
|
||
+set -eu -o pipefail
|
||
+
|
||
+# Small convenience script for running the tests with various combinations of
|
||
+# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||
+
|
||
+go test ./...
|
||
+go test -tags purego ./...
|
||
+GOARCH=arm64 go test
|
||
+GOARCH=arm64 go test -tags purego
|
||
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
|
||
index 15c835d5417c..a9e0d45c9dcc 100644
|
||
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
|
||
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
|
||
@@ -16,19 +16,11 @@ const (
|
||
prime5 uint64 = 2870177450012600261
|
||
)
|
||
|
||
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||
-// possible in the Go code is worth a small (but measurable) performance boost
|
||
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||
-// convenience in the Go code in a few places where we need to intentionally
|
||
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||
-// result overflows a uint64).
|
||
-var (
|
||
- prime1v = prime1
|
||
- prime2v = prime2
|
||
- prime3v = prime3
|
||
- prime4v = prime4
|
||
- prime5v = prime5
|
||
-)
|
||
+// Store the primes in an array as well.
|
||
+//
|
||
+// The consts are used when possible in Go code to avoid MOVs but we need a
|
||
+// contiguous array of the assembly code.
|
||
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||
|
||
// Digest implements hash.Hash64.
|
||
type Digest struct {
|
||
@@ -50,10 +42,10 @@ func New() *Digest {
|
||
|
||
// Reset clears the Digest's state so that it can be reused.
|
||
func (d *Digest) Reset() {
|
||
- d.v1 = prime1v + prime2
|
||
+ d.v1 = primes[0] + prime2
|
||
d.v2 = prime2
|
||
d.v3 = 0
|
||
- d.v4 = -prime1v
|
||
+ d.v4 = -primes[0]
|
||
d.total = 0
|
||
d.n = 0
|
||
}
|
||
@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
||
n = len(b)
|
||
d.total += uint64(n)
|
||
|
||
+ memleft := d.mem[d.n&(len(d.mem)-1):]
|
||
+
|
||
if d.n+n < 32 {
|
||
// This new data doesn't even fill the current block.
|
||
- copy(d.mem[d.n:], b)
|
||
+ copy(memleft, b)
|
||
d.n += n
|
||
return
|
||
}
|
||
|
||
if d.n > 0 {
|
||
// Finish off the partial block.
|
||
- copy(d.mem[d.n:], b)
|
||
+ c := copy(memleft, b)
|
||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||
- b = b[32-d.n:]
|
||
+ b = b[c:]
|
||
d.n = 0
|
||
}
|
||
|
||
@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
|
||
|
||
h += d.total
|
||
|
||
- i, end := 0, d.n
|
||
- for ; i+8 <= end; i += 8 {
|
||
- k1 := round(0, u64(d.mem[i:i+8]))
|
||
+ b := d.mem[:d.n&(len(d.mem)-1)]
|
||
+ for ; len(b) >= 8; b = b[8:] {
|
||
+ k1 := round(0, u64(b[:8]))
|
||
h ^= k1
|
||
h = rol27(h)*prime1 + prime4
|
||
}
|
||
- if i+4 <= end {
|
||
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||
+ if len(b) >= 4 {
|
||
+ h ^= uint64(u32(b[:4])) * prime1
|
||
h = rol23(h)*prime2 + prime3
|
||
- i += 4
|
||
+ b = b[4:]
|
||
}
|
||
- for i < end {
|
||
- h ^= uint64(d.mem[i]) * prime5
|
||
+ for ; len(b) > 0; b = b[1:] {
|
||
+ h ^= uint64(b[0]) * prime5
|
||
h = rol11(h) * prime1
|
||
- i++
|
||
}
|
||
|
||
h ^= h >> 33
|
||
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
|
||
index be8db5bf7960..3e8b132579ec 100644
|
||
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
|
||
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
|
||
@@ -1,215 +1,209 @@
|
||
+//go:build !appengine && gc && !purego
|
||
// +build !appengine
|
||
// +build gc
|
||
// +build !purego
|
||
|
||
#include "textflag.h"
|
||
|
||
-// Register allocation:
|
||
-// AX h
|
||
-// SI pointer to advance through b
|
||
-// DX n
|
||
-// BX loop end
|
||
-// R8 v1, k1
|
||
-// R9 v2
|
||
-// R10 v3
|
||
-// R11 v4
|
||
-// R12 tmp
|
||
-// R13 prime1v
|
||
-// R14 prime2v
|
||
-// DI prime4v
|
||
-
|
||
-// round reads from and advances the buffer pointer in SI.
|
||
-// It assumes that R13 has prime1v and R14 has prime2v.
|
||
-#define round(r) \
|
||
- MOVQ (SI), R12 \
|
||
- ADDQ $8, SI \
|
||
- IMULQ R14, R12 \
|
||
- ADDQ R12, r \
|
||
- ROLQ $31, r \
|
||
- IMULQ R13, r
|
||
-
|
||
-// mergeRound applies a merge round on the two registers acc and val.
|
||
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||
-#define mergeRound(acc, val) \
|
||
- IMULQ R14, val \
|
||
- ROLQ $31, val \
|
||
- IMULQ R13, val \
|
||
- XORQ val, acc \
|
||
- IMULQ R13, acc \
|
||
- ADDQ DI, acc
|
||
+// Registers:
|
||
+#define h AX
|
||
+#define d AX
|
||
+#define p SI // pointer to advance through b
|
||
+#define n DX
|
||
+#define end BX // loop end
|
||
+#define v1 R8
|
||
+#define v2 R9
|
||
+#define v3 R10
|
||
+#define v4 R11
|
||
+#define x R12
|
||
+#define prime1 R13
|
||
+#define prime2 R14
|
||
+#define prime4 DI
|
||
+
|
||
+#define round(acc, x) \
|
||
+ IMULQ prime2, x \
|
||
+ ADDQ x, acc \
|
||
+ ROLQ $31, acc \
|
||
+ IMULQ prime1, acc
|
||
+
|
||
+// round0 performs the operation x = round(0, x).
|
||
+#define round0(x) \
|
||
+ IMULQ prime2, x \
|
||
+ ROLQ $31, x \
|
||
+ IMULQ prime1, x
|
||
+
|
||
+// mergeRound applies a merge round on the two registers acc and x.
|
||
+// It assumes that prime1, prime2, and prime4 have been loaded.
|
||
+#define mergeRound(acc, x) \
|
||
+ round0(x) \
|
||
+ XORQ x, acc \
|
||
+ IMULQ prime1, acc \
|
||
+ ADDQ prime4, acc
|
||
+
|
||
+// blockLoop processes as many 32-byte blocks as possible,
|
||
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||
+// to process.
|
||
+#define blockLoop() \
|
||
+loop: \
|
||
+ MOVQ +0(p), x \
|
||
+ round(v1, x) \
|
||
+ MOVQ +8(p), x \
|
||
+ round(v2, x) \
|
||
+ MOVQ +16(p), x \
|
||
+ round(v3, x) \
|
||
+ MOVQ +24(p), x \
|
||
+ round(v4, x) \
|
||
+ ADDQ $32, p \
|
||
+ CMPQ p, end \
|
||
+ JLE loop
|
||
|
||
// func Sum64(b []byte) uint64
|
||
-TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||
// Load fixed primes.
|
||
- MOVQ ·prime1v(SB), R13
|
||
- MOVQ ·prime2v(SB), R14
|
||
- MOVQ ·prime4v(SB), DI
|
||
+ MOVQ ·primes+0(SB), prime1
|
||
+ MOVQ ·primes+8(SB), prime2
|
||
+ MOVQ ·primes+24(SB), prime4
|
||
|
||
// Load slice.
|
||
- MOVQ b_base+0(FP), SI
|
||
- MOVQ b_len+8(FP), DX
|
||
- LEAQ (SI)(DX*1), BX
|
||
+ MOVQ b_base+0(FP), p
|
||
+ MOVQ b_len+8(FP), n
|
||
+ LEAQ (p)(n*1), end
|
||
|
||
// The first loop limit will be len(b)-32.
|
||
- SUBQ $32, BX
|
||
+ SUBQ $32, end
|
||
|
||
// Check whether we have at least one block.
|
||
- CMPQ DX, $32
|
||
+ CMPQ n, $32
|
||
JLT noBlocks
|
||
|
||
// Set up initial state (v1, v2, v3, v4).
|
||
- MOVQ R13, R8
|
||
- ADDQ R14, R8
|
||
- MOVQ R14, R9
|
||
- XORQ R10, R10
|
||
- XORQ R11, R11
|
||
- SUBQ R13, R11
|
||
-
|
||
- // Loop until SI > BX.
|
||
-blockLoop:
|
||
- round(R8)
|
||
- round(R9)
|
||
- round(R10)
|
||
- round(R11)
|
||
-
|
||
- CMPQ SI, BX
|
||
- JLE blockLoop
|
||
-
|
||
- MOVQ R8, AX
|
||
- ROLQ $1, AX
|
||
- MOVQ R9, R12
|
||
- ROLQ $7, R12
|
||
- ADDQ R12, AX
|
||
- MOVQ R10, R12
|
||
- ROLQ $12, R12
|
||
- ADDQ R12, AX
|
||
- MOVQ R11, R12
|
||
- ROLQ $18, R12
|
||
- ADDQ R12, AX
|
||
-
|
||
- mergeRound(AX, R8)
|
||
- mergeRound(AX, R9)
|
||
- mergeRound(AX, R10)
|
||
- mergeRound(AX, R11)
|
||
+ MOVQ prime1, v1
|
||
+ ADDQ prime2, v1
|
||
+ MOVQ prime2, v2
|
||
+ XORQ v3, v3
|
||
+ XORQ v4, v4
|
||
+ SUBQ prime1, v4
|
||
+
|
||
+ blockLoop()
|
||
+
|
||
+ MOVQ v1, h
|
||
+ ROLQ $1, h
|
||
+ MOVQ v2, x
|
||
+ ROLQ $7, x
|
||
+ ADDQ x, h
|
||
+ MOVQ v3, x
|
||
+ ROLQ $12, x
|
||
+ ADDQ x, h
|
||
+ MOVQ v4, x
|
||
+ ROLQ $18, x
|
||
+ ADDQ x, h
|
||
+
|
||
+ mergeRound(h, v1)
|
||
+ mergeRound(h, v2)
|
||
+ mergeRound(h, v3)
|
||
+ mergeRound(h, v4)
|
||
|
||
JMP afterBlocks
|
||
|
||
noBlocks:
|
||
- MOVQ ·prime5v(SB), AX
|
||
+ MOVQ ·primes+32(SB), h
|
||
|
||
afterBlocks:
|
||
- ADDQ DX, AX
|
||
-
|
||
- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||
- ADDQ $24, BX
|
||
-
|
||
- CMPQ SI, BX
|
||
- JG fourByte
|
||
-
|
||
-wordLoop:
|
||
- // Calculate k1.
|
||
- MOVQ (SI), R8
|
||
- ADDQ $8, SI
|
||
- IMULQ R14, R8
|
||
- ROLQ $31, R8
|
||
- IMULQ R13, R8
|
||
-
|
||
- XORQ R8, AX
|
||
- ROLQ $27, AX
|
||
- IMULQ R13, AX
|
||
- ADDQ DI, AX
|
||
-
|
||
- CMPQ SI, BX
|
||
- JLE wordLoop
|
||
-
|
||
-fourByte:
|
||
- ADDQ $4, BX
|
||
- CMPQ SI, BX
|
||
- JG singles
|
||
-
|
||
- MOVL (SI), R8
|
||
- ADDQ $4, SI
|
||
- IMULQ R13, R8
|
||
- XORQ R8, AX
|
||
-
|
||
- ROLQ $23, AX
|
||
- IMULQ R14, AX
|
||
- ADDQ ·prime3v(SB), AX
|
||
-
|
||
-singles:
|
||
- ADDQ $4, BX
|
||
- CMPQ SI, BX
|
||
+ ADDQ n, h
|
||
+
|
||
+ ADDQ $24, end
|
||
+ CMPQ p, end
|
||
+ JG try4
|
||
+
|
||
+loop8:
|
||
+ MOVQ (p), x
|
||
+ ADDQ $8, p
|
||
+ round0(x)
|
||
+ XORQ x, h
|
||
+ ROLQ $27, h
|
||
+ IMULQ prime1, h
|
||
+ ADDQ prime4, h
|
||
+
|
||
+ CMPQ p, end
|
||
+ JLE loop8
|
||
+
|
||
+try4:
|
||
+ ADDQ $4, end
|
||
+ CMPQ p, end
|
||
+ JG try1
|
||
+
|
||
+ MOVL (p), x
|
||
+ ADDQ $4, p
|
||
+ IMULQ prime1, x
|
||
+ XORQ x, h
|
||
+
|
||
+ ROLQ $23, h
|
||
+ IMULQ prime2, h
|
||
+ ADDQ ·primes+16(SB), h
|
||
+
|
||
+try1:
|
||
+ ADDQ $4, end
|
||
+ CMPQ p, end
|
||
JGE finalize
|
||
|
||
-singlesLoop:
|
||
- MOVBQZX (SI), R12
|
||
- ADDQ $1, SI
|
||
- IMULQ ·prime5v(SB), R12
|
||
- XORQ R12, AX
|
||
+loop1:
|
||
+ MOVBQZX (p), x
|
||
+ ADDQ $1, p
|
||
+ IMULQ ·primes+32(SB), x
|
||
+ XORQ x, h
|
||
+ ROLQ $11, h
|
||
+ IMULQ prime1, h
|
||
|
||
- ROLQ $11, AX
|
||
- IMULQ R13, AX
|
||
-
|
||
- CMPQ SI, BX
|
||
- JL singlesLoop
|
||
+ CMPQ p, end
|
||
+ JL loop1
|
||
|
||
finalize:
|
||
- MOVQ AX, R12
|
||
- SHRQ $33, R12
|
||
- XORQ R12, AX
|
||
- IMULQ R14, AX
|
||
- MOVQ AX, R12
|
||
- SHRQ $29, R12
|
||
- XORQ R12, AX
|
||
- IMULQ ·prime3v(SB), AX
|
||
- MOVQ AX, R12
|
||
- SHRQ $32, R12
|
||
- XORQ R12, AX
|
||
-
|
||
- MOVQ AX, ret+24(FP)
|
||
+ MOVQ h, x
|
||
+ SHRQ $33, x
|
||
+ XORQ x, h
|
||
+ IMULQ prime2, h
|
||
+ MOVQ h, x
|
||
+ SHRQ $29, x
|
||
+ XORQ x, h
|
||
+ IMULQ ·primes+16(SB), h
|
||
+ MOVQ h, x
|
||
+ SHRQ $32, x
|
||
+ XORQ x, h
|
||
+
|
||
+ MOVQ h, ret+24(FP)
|
||
RET
|
||
|
||
-// writeBlocks uses the same registers as above except that it uses AX to store
|
||
-// the d pointer.
|
||
-
|
||
// func writeBlocks(d *Digest, b []byte) int
|
||
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||
// Load fixed primes needed for round.
|
||
- MOVQ ·prime1v(SB), R13
|
||
- MOVQ ·prime2v(SB), R14
|
||
+ MOVQ ·primes+0(SB), prime1
|
||
+ MOVQ ·primes+8(SB), prime2
|
||
|
||
// Load slice.
|
||
- MOVQ b_base+8(FP), SI
|
||
- MOVQ b_len+16(FP), DX
|
||
- LEAQ (SI)(DX*1), BX
|
||
- SUBQ $32, BX
|
||
+ MOVQ b_base+8(FP), p
|
||
+ MOVQ b_len+16(FP), n
|
||
+ LEAQ (p)(n*1), end
|
||
+ SUBQ $32, end
|
||
|
||
// Load vN from d.
|
||
- MOVQ d+0(FP), AX
|
||
- MOVQ 0(AX), R8 // v1
|
||
- MOVQ 8(AX), R9 // v2
|
||
- MOVQ 16(AX), R10 // v3
|
||
- MOVQ 24(AX), R11 // v4
|
||
+ MOVQ s+0(FP), d
|
||
+ MOVQ 0(d), v1
|
||
+ MOVQ 8(d), v2
|
||
+ MOVQ 16(d), v3
|
||
+ MOVQ 24(d), v4
|
||
|
||
// We don't need to check the loop condition here; this function is
|
||
// always called with at least one block of data to process.
|
||
-blockLoop:
|
||
- round(R8)
|
||
- round(R9)
|
||
- round(R10)
|
||
- round(R11)
|
||
-
|
||
- CMPQ SI, BX
|
||
- JLE blockLoop
|
||
+ blockLoop()
|
||
|
||
// Copy vN back to d.
|
||
- MOVQ R8, 0(AX)
|
||
- MOVQ R9, 8(AX)
|
||
- MOVQ R10, 16(AX)
|
||
- MOVQ R11, 24(AX)
|
||
-
|
||
- // The number of bytes written is SI minus the old base pointer.
|
||
- SUBQ b_base+8(FP), SI
|
||
- MOVQ SI, ret+32(FP)
|
||
+ MOVQ v1, 0(d)
|
||
+ MOVQ v2, 8(d)
|
||
+ MOVQ v3, 16(d)
|
||
+ MOVQ v4, 24(d)
|
||
+
|
||
+ // The number of bytes written is p minus the old base pointer.
|
||
+ SUBQ b_base+8(FP), p
|
||
+ MOVQ p, ret+32(FP)
|
||
|
||
RET
|
||
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
|
||
new file mode 100644
|
||
index 000000000000..7e3145a22186
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
|
||
@@ -0,0 +1,183 @@
|
||
+//go:build !appengine && gc && !purego
|
||
+// +build !appengine
|
||
+// +build gc
|
||
+// +build !purego
|
||
+
|
||
+#include "textflag.h"
|
||
+
|
||
+// Registers:
|
||
+#define digest R1
|
||
+#define h R2 // return value
|
||
+#define p R3 // input pointer
|
||
+#define n R4 // input length
|
||
+#define nblocks R5 // n / 32
|
||
+#define prime1 R7
|
||
+#define prime2 R8
|
||
+#define prime3 R9
|
||
+#define prime4 R10
|
||
+#define prime5 R11
|
||
+#define v1 R12
|
||
+#define v2 R13
|
||
+#define v3 R14
|
||
+#define v4 R15
|
||
+#define x1 R20
|
||
+#define x2 R21
|
||
+#define x3 R22
|
||
+#define x4 R23
|
||
+
|
||
+#define round(acc, x) \
|
||
+ MADD prime2, acc, x, acc \
|
||
+ ROR $64-31, acc \
|
||
+ MUL prime1, acc
|
||
+
|
||
+// round0 performs the operation x = round(0, x).
|
||
+#define round0(x) \
|
||
+ MUL prime2, x \
|
||
+ ROR $64-31, x \
|
||
+ MUL prime1, x
|
||
+
|
||
+#define mergeRound(acc, x) \
|
||
+ round0(x) \
|
||
+ EOR x, acc \
|
||
+ MADD acc, prime4, prime1, acc
|
||
+
|
||
+// blockLoop processes as many 32-byte blocks as possible,
|
||
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||
+#define blockLoop() \
|
||
+ LSR $5, n, nblocks \
|
||
+ PCALIGN $16 \
|
||
+ loop: \
|
||
+ LDP.P 16(p), (x1, x2) \
|
||
+ LDP.P 16(p), (x3, x4) \
|
||
+ round(v1, x1) \
|
||
+ round(v2, x2) \
|
||
+ round(v3, x3) \
|
||
+ round(v4, x4) \
|
||
+ SUB $1, nblocks \
|
||
+ CBNZ nblocks, loop
|
||
+
|
||
+// func Sum64(b []byte) uint64
|
||
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||
+ LDP b_base+0(FP), (p, n)
|
||
+
|
||
+ LDP ·primes+0(SB), (prime1, prime2)
|
||
+ LDP ·primes+16(SB), (prime3, prime4)
|
||
+ MOVD ·primes+32(SB), prime5
|
||
+
|
||
+ CMP $32, n
|
||
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||
+ BLT afterLoop
|
||
+
|
||
+ ADD prime1, prime2, v1
|
||
+ MOVD prime2, v2
|
||
+ MOVD $0, v3
|
||
+ NEG prime1, v4
|
||
+
|
||
+ blockLoop()
|
||
+
|
||
+ ROR $64-1, v1, x1
|
||
+ ROR $64-7, v2, x2
|
||
+ ADD x1, x2
|
||
+ ROR $64-12, v3, x3
|
||
+ ROR $64-18, v4, x4
|
||
+ ADD x3, x4
|
||
+ ADD x2, x4, h
|
||
+
|
||
+ mergeRound(h, v1)
|
||
+ mergeRound(h, v2)
|
||
+ mergeRound(h, v3)
|
||
+ mergeRound(h, v4)
|
||
+
|
||
+afterLoop:
|
||
+ ADD n, h
|
||
+
|
||
+ TBZ $4, n, try8
|
||
+ LDP.P 16(p), (x1, x2)
|
||
+
|
||
+ round0(x1)
|
||
+
|
||
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
|
||
+ // rotated register) is worth a small but measurable speedup for small
|
||
+ // inputs.
|
||
+ ROR $64-27, h
|
||
+ EOR x1 @> 64-27, h, h
|
||
+ MADD h, prime4, prime1, h
|
||
+
|
||
+ round0(x2)
|
||
+ ROR $64-27, h
|
||
+ EOR x2 @> 64-27, h, h
|
||
+ MADD h, prime4, prime1, h
|
||
+
|
||
+try8:
|
||
+ TBZ $3, n, try4
|
||
+ MOVD.P 8(p), x1
|
||
+
|
||
+ round0(x1)
|
||
+ ROR $64-27, h
|
||
+ EOR x1 @> 64-27, h, h
|
||
+ MADD h, prime4, prime1, h
|
||
+
|
||
+try4:
|
||
+ TBZ $2, n, try2
|
||
+ MOVWU.P 4(p), x2
|
||
+
|
||
+ MUL prime1, x2
|
||
+ ROR $64-23, h
|
||
+ EOR x2 @> 64-23, h, h
|
||
+ MADD h, prime3, prime2, h
|
||
+
|
||
+try2:
|
||
+ TBZ $1, n, try1
|
||
+ MOVHU.P 2(p), x3
|
||
+ AND $255, x3, x1
|
||
+ LSR $8, x3, x2
|
||
+
|
||
+ MUL prime5, x1
|
||
+ ROR $64-11, h
|
||
+ EOR x1 @> 64-11, h, h
|
||
+ MUL prime1, h
|
||
+
|
||
+ MUL prime5, x2
|
||
+ ROR $64-11, h
|
||
+ EOR x2 @> 64-11, h, h
|
||
+ MUL prime1, h
|
||
+
|
||
+try1:
|
||
+ TBZ $0, n, finalize
|
||
+ MOVBU (p), x4
|
||
+
|
||
+ MUL prime5, x4
|
||
+ ROR $64-11, h
|
||
+ EOR x4 @> 64-11, h, h
|
||
+ MUL prime1, h
|
||
+
|
||
+finalize:
|
||
+ EOR h >> 33, h
|
||
+ MUL prime2, h
|
||
+ EOR h >> 29, h
|
||
+ MUL prime3, h
|
||
+ EOR h >> 32, h
|
||
+
|
||
+ MOVD h, ret+24(FP)
|
||
+ RET
|
||
+
|
||
+// func writeBlocks(d *Digest, b []byte) int
|
||
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||
+ LDP ·primes+0(SB), (prime1, prime2)
|
||
+
|
||
+ // Load state. Assume v[1-4] are stored contiguously.
|
||
+ MOVD d+0(FP), digest
|
||
+ LDP 0(digest), (v1, v2)
|
||
+ LDP 16(digest), (v3, v4)
|
||
+
|
||
+ LDP b_base+8(FP), (p, n)
|
||
+
|
||
+ blockLoop()
|
||
+
|
||
+ // Store updated state.
|
||
+ STP (v1, v2), 0(digest)
|
||
+ STP (v3, v4), 16(digest)
|
||
+
|
||
+ BIC $31, n
|
||
+ MOVD n, ret+32(FP)
|
||
+ RET
|
||
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
|
||
similarity index 73%
|
||
rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
|
||
rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
|
||
index ad14b807f4d9..9216e0a40c1a 100644
|
||
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
|
||
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
|
||
@@ -1,3 +1,5 @@
|
||
+//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||
+// +build amd64 arm64
|
||
// +build !appengine
|
||
// +build gc
|
||
// +build !purego
|
||
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
|
||
index 4a5a821603e5..26df13bba4b7 100644
|
||
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
|
||
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
|
||
@@ -1,4 +1,5 @@
|
||
-// +build !amd64 appengine !gc purego
|
||
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||
+// +build !amd64,!arm64 appengine !gc purego
|
||
|
||
package xxhash
|
||
|
||
@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
||
var h uint64
|
||
|
||
if n >= 32 {
|
||
- v1 := prime1v + prime2
|
||
+ v1 := primes[0] + prime2
|
||
v2 := prime2
|
||
v3 := uint64(0)
|
||
- v4 := -prime1v
|
||
+ v4 := -primes[0]
|
||
for len(b) >= 32 {
|
||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||
@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
||
|
||
h += uint64(n)
|
||
|
||
- i, end := 0, len(b)
|
||
- for ; i+8 <= end; i += 8 {
|
||
- k1 := round(0, u64(b[i:i+8:len(b)]))
|
||
+ for ; len(b) >= 8; b = b[8:] {
|
||
+ k1 := round(0, u64(b[:8]))
|
||
h ^= k1
|
||
h = rol27(h)*prime1 + prime4
|
||
}
|
||
- if i+4 <= end {
|
||
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||
+ if len(b) >= 4 {
|
||
+ h ^= uint64(u32(b[:4])) * prime1
|
||
h = rol23(h)*prime2 + prime3
|
||
- i += 4
|
||
+ b = b[4:]
|
||
}
|
||
- for ; i < end; i++ {
|
||
- h ^= uint64(b[i]) * prime5
|
||
+ for ; len(b) > 0; b = b[1:] {
|
||
+ h ^= uint64(b[0]) * prime5
|
||
h = rol11(h) * prime1
|
||
}
|
||
|
||
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
|
||
index fc9bea7a31f2..e86f1b5fd8e4 100644
|
||
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
|
||
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
|
||
@@ -1,3 +1,4 @@
|
||
+//go:build appengine
|
||
// +build appengine
|
||
|
||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
|
||
index 376e0ca2e497..1c1638fd88a1 100644
|
||
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
|
||
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
|
||
@@ -1,3 +1,4 @@
|
||
+//go:build !appengine
|
||
// +build !appengine
|
||
|
||
// This file encapsulates usage of unsafe.
|
||
@@ -11,7 +12,7 @@ import (
|
||
|
||
// In the future it's possible that compiler optimizations will make these
|
||
// XxxString functions unnecessary by realizing that calls such as
|
||
-// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||
// If that happens, even if we keep these functions they can be replaced with
|
||
// the trivial safe code.
|
||
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore b/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore
|
||
new file mode 100644
|
||
index 000000000000..30f97c3d73ab
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/.gitignore
|
||
@@ -0,0 +1,2 @@
|
||
+go-md2man
|
||
+bin
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml b/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml
|
||
new file mode 100644
|
||
index 000000000000..71f073f3c6b9
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/.golangci.yml
|
||
@@ -0,0 +1,6 @@
|
||
+# For documentation, see https://golangci-lint.run/usage/configuration/
|
||
+
|
||
+linters:
|
||
+ enable:
|
||
+ - gofumpt
|
||
+
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile b/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile
|
||
new file mode 100644
|
||
index 000000000000..7181c5306f41
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/Dockerfile
|
||
@@ -0,0 +1,20 @@
|
||
+ARG GO_VERSION=1.18
|
||
+ARG GO_IMAGE=golang:${GO_VERSION}
|
||
+
|
||
+FROM --platform=$BUILDPLATFORM $GO_IMAGE AS build
|
||
+COPY . /go/src/github.com/cpuguy83/go-md2man
|
||
+WORKDIR /go/src/github.com/cpuguy83/go-md2man
|
||
+ARG TARGETOS
|
||
+ARG TARGETARCH
|
||
+ARG TARGETVARIANT
|
||
+RUN \
|
||
+ export GOOS="${TARGETOS}"; \
|
||
+ export GOARCH="${TARGETARCH}"; \
|
||
+ if [ "${TARGETARCH}" = "arm" ] && [ "${TARGETVARIANT}" ]; then \
|
||
+ export GOARM="${TARGETVARIANT#v}"; \
|
||
+ fi; \
|
||
+ CGO_ENABLED=0 go build
|
||
+
|
||
+FROM scratch
|
||
+COPY --from=build /go/src/github.com/cpuguy83/go-md2man/go-md2man /go-md2man
|
||
+ENTRYPOINT ["/go-md2man"]
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
|
||
new file mode 100644
|
||
index 000000000000..1cade6cef6a1
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md
|
||
@@ -0,0 +1,21 @@
|
||
+The MIT License (MIT)
|
||
+
|
||
+Copyright (c) 2014 Brian Goff
|
||
+
|
||
+Permission is hereby granted, free of charge, to any person obtaining a copy
|
||
+of this software and associated documentation files (the "Software"), to deal
|
||
+in the Software without restriction, including without limitation the rights
|
||
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||
+copies of the Software, and to permit persons to whom the Software is
|
||
+furnished to do so, subject to the following conditions:
|
||
+
|
||
+The above copyright notice and this permission notice shall be included in all
|
||
+copies or substantial portions of the Software.
|
||
+
|
||
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||
+SOFTWARE.
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/Makefile b/vendor/github.com/cpuguy83/go-md2man/v2/Makefile
|
||
new file mode 100644
|
||
index 000000000000..437fc9997926
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/Makefile
|
||
@@ -0,0 +1,35 @@
|
||
+GO111MODULE ?= on
|
||
+LINTER_BIN ?= golangci-lint
|
||
+
|
||
+export GO111MODULE
|
||
+
|
||
+.PHONY:
|
||
+build: bin/go-md2man
|
||
+
|
||
+.PHONY: clean
|
||
+clean:
|
||
+ @rm -rf bin/*
|
||
+
|
||
+.PHONY: test
|
||
+test:
|
||
+ @go test $(TEST_FLAGS) ./...
|
||
+
|
||
+bin/go-md2man: actual_build_flags := $(BUILD_FLAGS) -o bin/go-md2man
|
||
+bin/go-md2man: bin
|
||
+ @CGO_ENABLED=0 go build $(actual_build_flags)
|
||
+
|
||
+bin:
|
||
+ @mkdir ./bin
|
||
+
|
||
+.PHONY: mod
|
||
+mod:
|
||
+ @go mod tidy
|
||
+
|
||
+.PHONY: check-mod
|
||
+check-mod: # verifies that module changes for go.mod and go.sum are checked in
|
||
+ @hack/ci/check_mods.sh
|
||
+
|
||
+.PHONY: vendor
|
||
+vendor: mod
|
||
+ @go mod vendor -v
|
||
+
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/README.md b/vendor/github.com/cpuguy83/go-md2man/v2/README.md
|
||
new file mode 100644
|
||
index 000000000000..0e30d341483c
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/README.md
|
||
@@ -0,0 +1,15 @@
|
||
+go-md2man
|
||
+=========
|
||
+
|
||
+Converts markdown into roff (man pages).
|
||
+
|
||
+Uses blackfriday to process markdown into man pages.
|
||
+
|
||
+### Usage
|
||
+
|
||
+./md2man -in /path/to/markdownfile.md -out /manfile/output/path
|
||
+
|
||
+### How to contribute
|
||
+
|
||
+We use go modules to manage dependencies.
|
||
+As such you must be using at lest go1.11.
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md b/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
|
||
new file mode 100644
|
||
index 000000000000..aa4587e279ff
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/go-md2man.1.md
|
||
@@ -0,0 +1,28 @@
|
||
+go-md2man 1 "January 2015" go-md2man "User Manual"
|
||
+==================================================
|
||
+
|
||
+# NAME
|
||
+go-md2man - Convert markdown files into manpages
|
||
+
|
||
+# SYNOPSIS
|
||
+**go-md2man** [**-in**=*/path/to/md/file*] [**-out**=*/path/to/output*]
|
||
+
|
||
+# DESCRIPTION
|
||
+**go-md2man** converts standard markdown formatted documents into manpages. It is
|
||
+written purely in Go so as to reduce dependencies on 3rd party libs.
|
||
+
|
||
+By default, the input is stdin and the output is stdout.
|
||
+
|
||
+# EXAMPLES
|
||
+Convert the markdown file *go-md2man.1.md* into a manpage:
|
||
+```
|
||
+go-md2man < go-md2man.1.md > go-md2man.1
|
||
+```
|
||
+
|
||
+Same, but using command line arguments instead of shell redirection:
|
||
+```
|
||
+go-md2man -in=go-md2man.1.md -out=go-md2man.1
|
||
+```
|
||
+
|
||
+# HISTORY
|
||
+January 2015, Originally compiled by Brian Goff (cpuguy83@gmail.com).
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go
|
||
new file mode 100644
|
||
index 000000000000..4ff873b8e767
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man.go
|
||
@@ -0,0 +1,53 @@
|
||
+package main
|
||
+
|
||
+import (
|
||
+ "flag"
|
||
+ "fmt"
|
||
+ "io/ioutil"
|
||
+ "os"
|
||
+
|
||
+ "github.com/cpuguy83/go-md2man/v2/md2man"
|
||
+)
|
||
+
|
||
+var (
|
||
+ inFilePath = flag.String("in", "", "Path to file to be processed (default: stdin)")
|
||
+ outFilePath = flag.String("out", "", "Path to output processed file (default: stdout)")
|
||
+)
|
||
+
|
||
+func main() {
|
||
+ var err error
|
||
+ flag.Parse()
|
||
+
|
||
+ inFile := os.Stdin
|
||
+ if *inFilePath != "" {
|
||
+ inFile, err = os.Open(*inFilePath)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+ }
|
||
+ defer inFile.Close() // nolint: errcheck
|
||
+
|
||
+ doc, err := ioutil.ReadAll(inFile)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+
|
||
+ out := md2man.Render(doc)
|
||
+
|
||
+ outFile := os.Stdout
|
||
+ if *outFilePath != "" {
|
||
+ outFile, err = os.Create(*outFilePath)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+ defer outFile.Close() // nolint: errcheck
|
||
+ }
|
||
+ _, err = outFile.Write(out)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+}
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
|
||
new file mode 100644
|
||
index 000000000000..42bf32aab003
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
|
||
@@ -0,0 +1,16 @@
|
||
+package md2man
|
||
+
|
||
+import (
|
||
+ "github.com/russross/blackfriday/v2"
|
||
+)
|
||
+
|
||
+// Render converts a markdown document into a roff formatted document.
|
||
+func Render(doc []byte) []byte {
|
||
+ renderer := NewRoffRenderer()
|
||
+
|
||
+ return blackfriday.Run(doc,
|
||
+ []blackfriday.Option{
|
||
+ blackfriday.WithRenderer(renderer),
|
||
+ blackfriday.WithExtensions(renderer.GetExtensions()),
|
||
+ }...)
|
||
+}
|
||
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
|
||
new file mode 100644
|
||
index 000000000000..4b19188d90fd
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
|
||
@@ -0,0 +1,348 @@
|
||
+package md2man
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "os"
|
||
+ "strings"
|
||
+
|
||
+ "github.com/russross/blackfriday/v2"
|
||
+)
|
||
+
|
||
+// roffRenderer implements the blackfriday.Renderer interface for creating
|
||
+// roff format (manpages) from markdown text
|
||
+type roffRenderer struct {
|
||
+ extensions blackfriday.Extensions
|
||
+ listCounters []int
|
||
+ firstHeader bool
|
||
+ firstDD bool
|
||
+ listDepth int
|
||
+}
|
||
+
|
||
+const (
|
||
+ titleHeader = ".TH "
|
||
+ topLevelHeader = "\n\n.SH "
|
||
+ secondLevelHdr = "\n.SH "
|
||
+ otherHeader = "\n.SS "
|
||
+ crTag = "\n"
|
||
+ emphTag = "\\fI"
|
||
+ emphCloseTag = "\\fP"
|
||
+ strongTag = "\\fB"
|
||
+ strongCloseTag = "\\fP"
|
||
+ breakTag = "\n.br\n"
|
||
+ paraTag = "\n.PP\n"
|
||
+ hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
|
||
+ linkTag = "\n\\[la]"
|
||
+ linkCloseTag = "\\[ra]"
|
||
+ codespanTag = "\\fB"
|
||
+ codespanCloseTag = "\\fR"
|
||
+ codeTag = "\n.EX\n"
|
||
+ codeCloseTag = "\n.EE\n"
|
||
+ quoteTag = "\n.PP\n.RS\n"
|
||
+ quoteCloseTag = "\n.RE\n"
|
||
+ listTag = "\n.RS\n"
|
||
+ listCloseTag = "\n.RE\n"
|
||
+ dtTag = "\n.TP\n"
|
||
+ dd2Tag = "\n"
|
||
+ tableStart = "\n.TS\nallbox;\n"
|
||
+ tableEnd = ".TE\n"
|
||
+ tableCellStart = "T{\n"
|
||
+ tableCellEnd = "\nT}\n"
|
||
+)
|
||
+
|
||
+// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||
+// from markdown
|
||
+func NewRoffRenderer() *roffRenderer { // nolint: golint
|
||
+ var extensions blackfriday.Extensions
|
||
+
|
||
+ extensions |= blackfriday.NoIntraEmphasis
|
||
+ extensions |= blackfriday.Tables
|
||
+ extensions |= blackfriday.FencedCode
|
||
+ extensions |= blackfriday.SpaceHeadings
|
||
+ extensions |= blackfriday.Footnotes
|
||
+ extensions |= blackfriday.Titleblock
|
||
+ extensions |= blackfriday.DefinitionLists
|
||
+ return &roffRenderer{
|
||
+ extensions: extensions,
|
||
+ }
|
||
+}
|
||
+
|
||
+// GetExtensions returns the list of extensions used by this renderer implementation
|
||
+func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
|
||
+ return r.extensions
|
||
+}
|
||
+
|
||
+// RenderHeader handles outputting the header at document start
|
||
+func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
|
||
+ // disable hyphenation
|
||
+ out(w, ".nh\n")
|
||
+}
|
||
+
|
||
+// RenderFooter handles outputting the footer at the document end; the roff
|
||
+// renderer has no footer information
|
||
+func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
||
+}
|
||
+
|
||
+// RenderNode is called for each node in a markdown document; based on the node
|
||
+// type the equivalent roff output is sent to the writer
|
||
+func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||
+ walkAction := blackfriday.GoToNext
|
||
+
|
||
+ switch node.Type {
|
||
+ case blackfriday.Text:
|
||
+ escapeSpecialChars(w, node.Literal)
|
||
+ case blackfriday.Softbreak:
|
||
+ out(w, crTag)
|
||
+ case blackfriday.Hardbreak:
|
||
+ out(w, breakTag)
|
||
+ case blackfriday.Emph:
|
||
+ if entering {
|
||
+ out(w, emphTag)
|
||
+ } else {
|
||
+ out(w, emphCloseTag)
|
||
+ }
|
||
+ case blackfriday.Strong:
|
||
+ if entering {
|
||
+ out(w, strongTag)
|
||
+ } else {
|
||
+ out(w, strongCloseTag)
|
||
+ }
|
||
+ case blackfriday.Link:
|
||
+ // Don't render the link text for automatic links, because this
|
||
+ // will only duplicate the URL in the roff output.
|
||
+ // See https://daringfireball.net/projects/markdown/syntax#autolink
|
||
+ if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) {
|
||
+ out(w, string(node.FirstChild.Literal))
|
||
+ }
|
||
+ // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page.
|
||
+ escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-")
|
||
+ out(w, linkTag+escapedLink+linkCloseTag)
|
||
+ walkAction = blackfriday.SkipChildren
|
||
+ case blackfriday.Image:
|
||
+ // ignore images
|
||
+ walkAction = blackfriday.SkipChildren
|
||
+ case blackfriday.Code:
|
||
+ out(w, codespanTag)
|
||
+ escapeSpecialChars(w, node.Literal)
|
||
+ out(w, codespanCloseTag)
|
||
+ case blackfriday.Document:
|
||
+ break
|
||
+ case blackfriday.Paragraph:
|
||
+ // roff .PP markers break lists
|
||
+ if r.listDepth > 0 {
|
||
+ return blackfriday.GoToNext
|
||
+ }
|
||
+ if entering {
|
||
+ out(w, paraTag)
|
||
+ } else {
|
||
+ out(w, crTag)
|
||
+ }
|
||
+ case blackfriday.BlockQuote:
|
||
+ if entering {
|
||
+ out(w, quoteTag)
|
||
+ } else {
|
||
+ out(w, quoteCloseTag)
|
||
+ }
|
||
+ case blackfriday.Heading:
|
||
+ r.handleHeading(w, node, entering)
|
||
+ case blackfriday.HorizontalRule:
|
||
+ out(w, hruleTag)
|
||
+ case blackfriday.List:
|
||
+ r.handleList(w, node, entering)
|
||
+ case blackfriday.Item:
|
||
+ r.handleItem(w, node, entering)
|
||
+ case blackfriday.CodeBlock:
|
||
+ out(w, codeTag)
|
||
+ escapeSpecialChars(w, node.Literal)
|
||
+ out(w, codeCloseTag)
|
||
+ case blackfriday.Table:
|
||
+ r.handleTable(w, node, entering)
|
||
+ case blackfriday.TableHead:
|
||
+ case blackfriday.TableBody:
|
||
+ case blackfriday.TableRow:
|
||
+ // no action as cell entries do all the nroff formatting
|
||
+ return blackfriday.GoToNext
|
||
+ case blackfriday.TableCell:
|
||
+ r.handleTableCell(w, node, entering)
|
||
+ case blackfriday.HTMLSpan:
|
||
+ // ignore other HTML tags
|
||
+ case blackfriday.HTMLBlock:
|
||
+ if bytes.HasPrefix(node.Literal, []byte("<!--")) {
|
||
+ break // ignore comments, no warning
|
||
+ }
|
||
+ fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||
+ default:
|
||
+ fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||
+ }
|
||
+ return walkAction
|
||
+}
|
||
+
|
||
+func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
|
||
+ if entering {
|
||
+ switch node.Level {
|
||
+ case 1:
|
||
+ if !r.firstHeader {
|
||
+ out(w, titleHeader)
|
||
+ r.firstHeader = true
|
||
+ break
|
||
+ }
|
||
+ out(w, topLevelHeader)
|
||
+ case 2:
|
||
+ out(w, secondLevelHdr)
|
||
+ default:
|
||
+ out(w, otherHeader)
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
|
||
+ openTag := listTag
|
||
+ closeTag := listCloseTag
|
||
+ if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||
+ // tags for definition lists handled within Item node
|
||
+ openTag = ""
|
||
+ closeTag = ""
|
||
+ }
|
||
+ if entering {
|
||
+ r.listDepth++
|
||
+ if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||
+ r.listCounters = append(r.listCounters, 1)
|
||
+ }
|
||
+ out(w, openTag)
|
||
+ } else {
|
||
+ if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||
+ r.listCounters = r.listCounters[:len(r.listCounters)-1]
|
||
+ }
|
||
+ out(w, closeTag)
|
||
+ r.listDepth--
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
|
||
+ if entering {
|
||
+ if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||
+ out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
|
||
+ r.listCounters[len(r.listCounters)-1]++
|
||
+ } else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
|
||
+ // DT (definition term): line just before DD (see below).
|
||
+ out(w, dtTag)
|
||
+ r.firstDD = true
|
||
+ } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||
+ // DD (definition description): line that starts with ": ".
|
||
+ //
|
||
+ // We have to distinguish between the first DD and the
|
||
+ // subsequent ones, as there should be no vertical
|
||
+ // whitespace between the DT and the first DD.
|
||
+ if r.firstDD {
|
||
+ r.firstDD = false
|
||
+ } else {
|
||
+ out(w, dd2Tag)
|
||
+ }
|
||
+ } else {
|
||
+ out(w, ".IP \\(bu 2\n")
|
||
+ }
|
||
+ } else {
|
||
+ out(w, "\n")
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
|
||
+ if entering {
|
||
+ out(w, tableStart)
|
||
+ // call walker to count cells (and rows?) so format section can be produced
|
||
+ columns := countColumns(node)
|
||
+ out(w, strings.Repeat("l ", columns)+"\n")
|
||
+ out(w, strings.Repeat("l ", columns)+".\n")
|
||
+ } else {
|
||
+ out(w, tableEnd)
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
|
||
+ if entering {
|
||
+ var start string
|
||
+ if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
|
||
+ start = "\t"
|
||
+ }
|
||
+ if node.IsHeader {
|
||
+ start += strongTag
|
||
+ } else if nodeLiteralSize(node) > 30 {
|
||
+ start += tableCellStart
|
||
+ }
|
||
+ out(w, start)
|
||
+ } else {
|
||
+ var end string
|
||
+ if node.IsHeader {
|
||
+ end = strongCloseTag
|
||
+ } else if nodeLiteralSize(node) > 30 {
|
||
+ end = tableCellEnd
|
||
+ }
|
||
+ if node.Next == nil && end != tableCellEnd {
|
||
+ // Last cell: need to carriage return if we are at the end of the
|
||
+ // header row and content isn't wrapped in a "tablecell"
|
||
+ end += crTag
|
||
+ }
|
||
+ out(w, end)
|
||
+ }
|
||
+}
|
||
+
|
||
+func nodeLiteralSize(node *blackfriday.Node) int {
|
||
+ total := 0
|
||
+ for n := node.FirstChild; n != nil; n = n.FirstChild {
|
||
+ total += len(n.Literal)
|
||
+ }
|
||
+ return total
|
||
+}
|
||
+
|
||
+// because roff format requires knowing the column count before outputting any table
|
||
+// data we need to walk a table tree and count the columns
|
||
+func countColumns(node *blackfriday.Node) int {
|
||
+ var columns int
|
||
+
|
||
+ node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||
+ switch node.Type {
|
||
+ case blackfriday.TableRow:
|
||
+ if !entering {
|
||
+ return blackfriday.Terminate
|
||
+ }
|
||
+ case blackfriday.TableCell:
|
||
+ if entering {
|
||
+ columns++
|
||
+ }
|
||
+ default:
|
||
+ }
|
||
+ return blackfriday.GoToNext
|
||
+ })
|
||
+ return columns
|
||
+}
|
||
+
|
||
+func out(w io.Writer, output string) {
|
||
+ io.WriteString(w, output) // nolint: errcheck
|
||
+}
|
||
+
|
||
+func escapeSpecialChars(w io.Writer, text []byte) {
|
||
+ for i := 0; i < len(text); i++ {
|
||
+ // escape initial apostrophe or period
|
||
+ if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
||
+ out(w, "\\&")
|
||
+ }
|
||
+
|
||
+ // directly copy normal characters
|
||
+ org := i
|
||
+
|
||
+ for i < len(text) && text[i] != '\\' {
|
||
+ i++
|
||
+ }
|
||
+ if i > org {
|
||
+ w.Write(text[org:i]) // nolint: errcheck
|
||
+ }
|
||
+
|
||
+ // escape a character
|
||
+ if i >= len(text) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||
+ }
|
||
+}
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/.dockerignore b/vendor/github.com/docker/cli-docs-tool/.dockerignore
|
||
new file mode 100644
|
||
index 000000000000..c8c323c89663
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/.dockerignore
|
||
@@ -0,0 +1,2 @@
|
||
+/coverage.txt
|
||
+/example/docs
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/.gitignore b/vendor/github.com/docker/cli-docs-tool/.gitignore
|
||
new file mode 100644
|
||
index 000000000000..c8c323c89663
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/.gitignore
|
||
@@ -0,0 +1,2 @@
|
||
+/coverage.txt
|
||
+/example/docs
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/.golangci.yml b/vendor/github.com/docker/cli-docs-tool/.golangci.yml
|
||
new file mode 100644
|
||
index 000000000000..6c6557176b28
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/.golangci.yml
|
||
@@ -0,0 +1,37 @@
|
||
+run:
|
||
+ timeout: 10m
|
||
+
|
||
+linters:
|
||
+ enable:
|
||
+ - deadcode
|
||
+ - depguard
|
||
+ - gofmt
|
||
+ - goimports
|
||
+ - revive
|
||
+ - govet
|
||
+ - importas
|
||
+ - ineffassign
|
||
+ - misspell
|
||
+ - typecheck
|
||
+ - varcheck
|
||
+ - errname
|
||
+ - makezero
|
||
+ - whitespace
|
||
+ disable-all: true
|
||
+
|
||
+linters-settings:
|
||
+ depguard:
|
||
+ list-type: blacklist
|
||
+ include-go-root: true
|
||
+ packages:
|
||
+ # The io/ioutil package has been deprecated.
|
||
+ # https://go.dev/doc/go1.16#ioutil
|
||
+ - io/ioutil
|
||
+ importas:
|
||
+ no-unaliased: true
|
||
+
|
||
+issues:
|
||
+ exclude-rules:
|
||
+ - linters:
|
||
+ - revive
|
||
+ text: "stutters"
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/Dockerfile b/vendor/github.com/docker/cli-docs-tool/Dockerfile
|
||
new file mode 100644
|
||
index 000000000000..f0e2739faa7c
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/Dockerfile
|
||
@@ -0,0 +1,86 @@
|
||
+# syntax=docker/dockerfile:1
|
||
+
|
||
+# Copyright 2021 cli-docs-tool authors
|
||
+#
|
||
+# Licensed under the Apache License, Version 2.0 (the "License");
|
||
+# you may not use this file except in compliance with the License.
|
||
+# You may obtain a copy of the License at
|
||
+#
|
||
+# http://www.apache.org/licenses/LICENSE-2.0
|
||
+#
|
||
+# Unless required by applicable law or agreed to in writing, software
|
||
+# distributed under the License is distributed on an "AS IS" BASIS,
|
||
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+# See the License for the specific language governing permissions and
|
||
+# limitations under the License.
|
||
+
|
||
+ARG GO_VERSION="1.18"
|
||
+ARG GOLANGCI_LINT_VERSION="v1.45"
|
||
+ARG ADDLICENSE_VERSION="v1.0.0"
|
||
+
|
||
+ARG LICENSE_ARGS="-c cli-docs-tool -l apache"
|
||
+ARG LICENSE_FILES=".*\(Dockerfile\|\.go\|\.hcl\|\.sh\)"
|
||
+
|
||
+FROM golangci/golangci-lint:${GOLANGCI_LINT_VERSION}-alpine AS golangci-lint
|
||
+FROM ghcr.io/google/addlicense:${ADDLICENSE_VERSION} AS addlicense
|
||
+
|
||
+FROM golang:${GO_VERSION}-alpine AS base
|
||
+RUN apk add --no-cache cpio findutils git linux-headers
|
||
+ENV CGO_ENABLED=0
|
||
+WORKDIR /src
|
||
+
|
||
+FROM base AS vendored
|
||
+RUN --mount=type=bind,target=.,rw \
|
||
+ --mount=type=cache,target=/go/pkg/mod \
|
||
+ go mod tidy && go mod download && \
|
||
+ mkdir /out && cp go.mod go.sum /out
|
||
+
|
||
+FROM scratch AS vendor-update
|
||
+COPY --from=vendored /out /
|
||
+
|
||
+FROM vendored AS vendor-validate
|
||
+RUN --mount=type=bind,target=.,rw <<EOT
|
||
+set -e
|
||
+git add -A
|
||
+cp -rf /out/* .
|
||
+diff=$(git status --porcelain -- go.mod go.sum)
|
||
+if [ -n "$diff" ]; then
|
||
+ echo >&2 'ERROR: Vendor result differs. Please vendor your package with "docker buildx bake vendor"'
|
||
+ echo "$diff"
|
||
+ exit 1
|
||
+fi
|
||
+EOT
|
||
+
|
||
+FROM base AS lint
|
||
+RUN --mount=type=bind,target=. \
|
||
+ --mount=type=cache,target=/root/.cache \
|
||
+ --mount=from=golangci-lint,source=/usr/bin/golangci-lint,target=/usr/bin/golangci-lint \
|
||
+ golangci-lint run ./...
|
||
+
|
||
+FROM base AS license-set
|
||
+ARG LICENSE_ARGS
|
||
+ARG LICENSE_FILES
|
||
+RUN --mount=type=bind,target=.,rw \
|
||
+ --mount=from=addlicense,source=/app/addlicense,target=/usr/bin/addlicense \
|
||
+ find . -regex "${LICENSE_FILES}" | xargs addlicense ${LICENSE_ARGS} \
|
||
+ && mkdir /out \
|
||
+ && find . -regex "${LICENSE_FILES}" | cpio -pdm /out
|
||
+
|
||
+FROM scratch AS license-update
|
||
+COPY --from=set /out /
|
||
+
|
||
+FROM base AS license-validate
|
||
+ARG LICENSE_ARGS
|
||
+ARG LICENSE_FILES
|
||
+RUN --mount=type=bind,target=. \
|
||
+ --mount=from=addlicense,source=/app/addlicense,target=/usr/bin/addlicense \
|
||
+ find . -regex "${LICENSE_FILES}" | xargs addlicense -check ${LICENSE_ARGS}
|
||
+
|
||
+FROM vendored AS test
|
||
+RUN --mount=type=bind,target=. \
|
||
+ --mount=type=cache,target=/root/.cache \
|
||
+ --mount=type=cache,target=/go/pkg/mod \
|
||
+ go test -v -coverprofile=/tmp/coverage.txt -covermode=atomic ./...
|
||
+
|
||
+FROM scratch AS test-coverage
|
||
+COPY --from=test /tmp/coverage.txt /coverage.txt
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/LICENSE b/vendor/github.com/docker/cli-docs-tool/LICENSE
|
||
new file mode 100644
|
||
index 000000000000..d64569567334
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/LICENSE
|
||
@@ -0,0 +1,202 @@
|
||
+
|
||
+ Apache License
|
||
+ Version 2.0, January 2004
|
||
+ http://www.apache.org/licenses/
|
||
+
|
||
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||
+
|
||
+ 1. Definitions.
|
||
+
|
||
+ "License" shall mean the terms and conditions for use, reproduction,
|
||
+ and distribution as defined by Sections 1 through 9 of this document.
|
||
+
|
||
+ "Licensor" shall mean the copyright owner or entity authorized by
|
||
+ the copyright owner that is granting the License.
|
||
+
|
||
+ "Legal Entity" shall mean the union of the acting entity and all
|
||
+ other entities that control, are controlled by, or are under common
|
||
+ control with that entity. For the purposes of this definition,
|
||
+ "control" means (i) the power, direct or indirect, to cause the
|
||
+ direction or management of such entity, whether by contract or
|
||
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||
+ outstanding shares, or (iii) beneficial ownership of such entity.
|
||
+
|
||
+ "You" (or "Your") shall mean an individual or Legal Entity
|
||
+ exercising permissions granted by this License.
|
||
+
|
||
+ "Source" form shall mean the preferred form for making modifications,
|
||
+ including but not limited to software source code, documentation
|
||
+ source, and configuration files.
|
||
+
|
||
+ "Object" form shall mean any form resulting from mechanical
|
||
+ transformation or translation of a Source form, including but
|
||
+ not limited to compiled object code, generated documentation,
|
||
+ and conversions to other media types.
|
||
+
|
||
+ "Work" shall mean the work of authorship, whether in Source or
|
||
+ Object form, made available under the License, as indicated by a
|
||
+ copyright notice that is included in or attached to the work
|
||
+ (an example is provided in the Appendix below).
|
||
+
|
||
+ "Derivative Works" shall mean any work, whether in Source or Object
|
||
+ form, that is based on (or derived from) the Work and for which the
|
||
+ editorial revisions, annotations, elaborations, or other modifications
|
||
+ represent, as a whole, an original work of authorship. For the purposes
|
||
+ of this License, Derivative Works shall not include works that remain
|
||
+ separable from, or merely link (or bind by name) to the interfaces of,
|
||
+ the Work and Derivative Works thereof.
|
||
+
|
||
+ "Contribution" shall mean any work of authorship, including
|
||
+ the original version of the Work and any modifications or additions
|
||
+ to that Work or Derivative Works thereof, that is intentionally
|
||
+ submitted to Licensor for inclusion in the Work by the copyright owner
|
||
+ or by an individual or Legal Entity authorized to submit on behalf of
|
||
+ the copyright owner. For the purposes of this definition, "submitted"
|
||
+ means any form of electronic, verbal, or written communication sent
|
||
+ to the Licensor or its representatives, including but not limited to
|
||
+ communication on electronic mailing lists, source code control systems,
|
||
+ and issue tracking systems that are managed by, or on behalf of, the
|
||
+ Licensor for the purpose of discussing and improving the Work, but
|
||
+ excluding communication that is conspicuously marked or otherwise
|
||
+ designated in writing by the copyright owner as "Not a Contribution."
|
||
+
|
||
+ "Contributor" shall mean Licensor and any individual or Legal Entity
|
||
+ on behalf of whom a Contribution has been received by Licensor and
|
||
+ subsequently incorporated within the Work.
|
||
+
|
||
+ 2. Grant of Copyright License. Subject to the terms and conditions of
|
||
+ this License, each Contributor hereby grants to You a perpetual,
|
||
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||
+ copyright license to reproduce, prepare Derivative Works of,
|
||
+ publicly display, publicly perform, sublicense, and distribute the
|
||
+ Work and such Derivative Works in Source or Object form.
|
||
+
|
||
+ 3. Grant of Patent License. Subject to the terms and conditions of
|
||
+ this License, each Contributor hereby grants to You a perpetual,
|
||
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||
+ (except as stated in this section) patent license to make, have made,
|
||
+ use, offer to sell, sell, import, and otherwise transfer the Work,
|
||
+ where such license applies only to those patent claims licensable
|
||
+ by such Contributor that are necessarily infringed by their
|
||
+ Contribution(s) alone or by combination of their Contribution(s)
|
||
+ with the Work to which such Contribution(s) was submitted. If You
|
||
+ institute patent litigation against any entity (including a
|
||
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||
+ or a Contribution incorporated within the Work constitutes direct
|
||
+ or contributory patent infringement, then any patent licenses
|
||
+ granted to You under this License for that Work shall terminate
|
||
+ as of the date such litigation is filed.
|
||
+
|
||
+ 4. Redistribution. You may reproduce and distribute copies of the
|
||
+ Work or Derivative Works thereof in any medium, with or without
|
||
+ modifications, and in Source or Object form, provided that You
|
||
+ meet the following conditions:
|
||
+
|
||
+ (a) You must give any other recipients of the Work or
|
||
+ Derivative Works a copy of this License; and
|
||
+
|
||
+ (b) You must cause any modified files to carry prominent notices
|
||
+ stating that You changed the files; and
|
||
+
|
||
+ (c) You must retain, in the Source form of any Derivative Works
|
||
+ that You distribute, all copyright, patent, trademark, and
|
||
+ attribution notices from the Source form of the Work,
|
||
+ excluding those notices that do not pertain to any part of
|
||
+ the Derivative Works; and
|
||
+
|
||
+ (d) If the Work includes a "NOTICE" text file as part of its
|
||
+ distribution, then any Derivative Works that You distribute must
|
||
+ include a readable copy of the attribution notices contained
|
||
+ within such NOTICE file, excluding those notices that do not
|
||
+ pertain to any part of the Derivative Works, in at least one
|
||
+ of the following places: within a NOTICE text file distributed
|
||
+ as part of the Derivative Works; within the Source form or
|
||
+ documentation, if provided along with the Derivative Works; or,
|
||
+ within a display generated by the Derivative Works, if and
|
||
+ wherever such third-party notices normally appear. The contents
|
||
+ of the NOTICE file are for informational purposes only and
|
||
+ do not modify the License. You may add Your own attribution
|
||
+ notices within Derivative Works that You distribute, alongside
|
||
+ or as an addendum to the NOTICE text from the Work, provided
|
||
+ that such additional attribution notices cannot be construed
|
||
+ as modifying the License.
|
||
+
|
||
+ You may add Your own copyright statement to Your modifications and
|
||
+ may provide additional or different license terms and conditions
|
||
+ for use, reproduction, or distribution of Your modifications, or
|
||
+ for any such Derivative Works as a whole, provided Your use,
|
||
+ reproduction, and distribution of the Work otherwise complies with
|
||
+ the conditions stated in this License.
|
||
+
|
||
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
|
||
+ any Contribution intentionally submitted for inclusion in the Work
|
||
+ by You to the Licensor shall be under the terms and conditions of
|
||
+ this License, without any additional terms or conditions.
|
||
+ Notwithstanding the above, nothing herein shall supersede or modify
|
||
+ the terms of any separate license agreement you may have executed
|
||
+ with Licensor regarding such Contributions.
|
||
+
|
||
+ 6. Trademarks. This License does not grant permission to use the trade
|
||
+ names, trademarks, service marks, or product names of the Licensor,
|
||
+ except as required for reasonable and customary use in describing the
|
||
+ origin of the Work and reproducing the content of the NOTICE file.
|
||
+
|
||
+ 7. Disclaimer of Warranty. Unless required by applicable law or
|
||
+ agreed to in writing, Licensor provides the Work (and each
|
||
+ Contributor provides its Contributions) on an "AS IS" BASIS,
|
||
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||
+ implied, including, without limitation, any warranties or conditions
|
||
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||
+ PARTICULAR PURPOSE. You are solely responsible for determining the
|
||
+ appropriateness of using or redistributing the Work and assume any
|
||
+ risks associated with Your exercise of permissions under this License.
|
||
+
|
||
+ 8. Limitation of Liability. In no event and under no legal theory,
|
||
+ whether in tort (including negligence), contract, or otherwise,
|
||
+ unless required by applicable law (such as deliberate and grossly
|
||
+ negligent acts) or agreed to in writing, shall any Contributor be
|
||
+ liable to You for damages, including any direct, indirect, special,
|
||
+ incidental, or consequential damages of any character arising as a
|
||
+ result of this License or out of the use or inability to use the
|
||
+ Work (including but not limited to damages for loss of goodwill,
|
||
+ work stoppage, computer failure or malfunction, or any and all
|
||
+ other commercial damages or losses), even if such Contributor
|
||
+ has been advised of the possibility of such damages.
|
||
+
|
||
+ 9. Accepting Warranty or Additional Liability. While redistributing
|
||
+ the Work or Derivative Works thereof, You may choose to offer,
|
||
+ and charge a fee for, acceptance of support, warranty, indemnity,
|
||
+ or other liability obligations and/or rights consistent with this
|
||
+ License. However, in accepting such obligations, You may act only
|
||
+ on Your own behalf and on Your sole responsibility, not on behalf
|
||
+ of any other Contributor, and only if You agree to indemnify,
|
||
+ defend, and hold each Contributor harmless for any liability
|
||
+ incurred by, or claims asserted against, such Contributor by reason
|
||
+ of your accepting any such warranty or additional liability.
|
||
+
|
||
+ END OF TERMS AND CONDITIONS
|
||
+
|
||
+ APPENDIX: How to apply the Apache License to your work.
|
||
+
|
||
+ To apply the Apache License to your work, attach the following
|
||
+ boilerplate notice, with the fields enclosed by brackets "[]"
|
||
+ replaced with your own identifying information. (Don't include
|
||
+ the brackets!) The text should be enclosed in the appropriate
|
||
+ comment syntax for the file format. We also recommend that a
|
||
+ file or class name and description of purpose be included on the
|
||
+ same "printed page" as the copyright notice for easier
|
||
+ identification within third-party archives.
|
||
+
|
||
+ Copyright [yyyy] [name of copyright owner]
|
||
+
|
||
+ Licensed under the Apache License, Version 2.0 (the "License");
|
||
+ you may not use this file except in compliance with the License.
|
||
+ You may obtain a copy of the License at
|
||
+
|
||
+ http://www.apache.org/licenses/LICENSE-2.0
|
||
+
|
||
+ Unless required by applicable law or agreed to in writing, software
|
||
+ distributed under the License is distributed on an "AS IS" BASIS,
|
||
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+ See the License for the specific language governing permissions and
|
||
+ limitations under the License.
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/README.md b/vendor/github.com/docker/cli-docs-tool/README.md
|
||
new file mode 100644
|
||
index 000000000000..4d5ee6474f8f
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/README.md
|
||
@@ -0,0 +1,67 @@
|
||
+[](https://pkg.go.dev/github.com/docker/cli-docs-tool)
|
||
+[](https://github.com/docker/cli-docs-tool/actions?query=workflow%3Atest)
|
||
+[](https://goreportcard.com/report/github.com/docker/cli-docs-tool)
|
||
+
|
||
+## About
|
||
+
|
||
+This is a library containing utilities to generate (reference) documentation
|
||
+for the [`docker` CLI](https://github.com/docker/cli) on [docs.docker.com](https://docs.docker.com/reference/).
|
||
+
|
||
+## Disclaimer
|
||
+
|
||
+This library is intended for use by Docker's CLIs, and is not intended to be a
|
||
+general-purpose utility. Various bits are hard-coded or make assumptions that
|
||
+are very specific to our use-case. Contributions are welcome, but we will not
|
||
+accept contributions to make this a general-purpose module.
|
||
+
|
||
+## Usage
|
||
+
|
||
+To generate the documentation it's recommended to do so using a Go submodule
|
||
+in your repository.
|
||
+
|
||
+We will use the example of `docker/buildx` and create a Go submodule in a
|
||
+`docs` folder (recommended):
|
||
+
|
||
+```console
|
||
+$ mkdir docs
|
||
+$ cd ./docs
|
||
+$ go mod init github.com/docker/buildx/docs
|
||
+$ go get github.com/docker/cli-docs-tool
|
||
+```
|
||
+
|
||
+Your `go.mod` should look like this:
|
||
+
|
||
+```text
|
||
+module github.com/docker/buildx/docs
|
||
+
|
||
+go 1.16
|
||
+
|
||
+require (
|
||
+ github.com/docker/cli-docs-tool v0.0.0
|
||
+)
|
||
+```
|
||
+
|
||
+Next, create a file named `main.go` inside that directory containing the
|
||
+following Go code from [`example/main.go`](example/main.go).
|
||
+
|
||
+Running this example should produce the following output:
|
||
+
|
||
+```console
|
||
+$ go run main.go
|
||
+INFO: Generating Markdown for "docker buildx bake"
|
||
+INFO: Generating Markdown for "docker buildx build"
|
||
+INFO: Generating Markdown for "docker buildx create"
|
||
+INFO: Generating Markdown for "docker buildx du"
|
||
+...
|
||
+INFO: Generating YAML for "docker buildx uninstall"
|
||
+INFO: Generating YAML for "docker buildx use"
|
||
+INFO: Generating YAML for "docker buildx version"
|
||
+INFO: Generating YAML for "docker buildx"
|
||
+```
|
||
+
|
||
+Generated docs will be available in the `./docs` folder of the project.
|
||
+
|
||
+## Contributing
|
||
+
|
||
+Want to contribute? Awesome! You can find information about contributing to
|
||
+this project in the [CONTRIBUTING.md](/.github/CONTRIBUTING.md)
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/annotation/annotation.go b/vendor/github.com/docker/cli-docs-tool/annotation/annotation.go
|
||
new file mode 100644
|
||
index 000000000000..021846af6e07
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/annotation/annotation.go
|
||
@@ -0,0 +1,25 @@
|
||
+// Copyright 2021 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package annotation
|
||
+
|
||
+const (
|
||
+ // ExternalURL specifies an external link annotation
|
||
+ ExternalURL = "docs.external.url"
|
||
+ // CodeDelimiter specifies the char that will be converted as code backtick.
|
||
+ // Can be used on cmd for inheritance or a specific flag.
|
||
+ CodeDelimiter = "docs.code-delimiter"
|
||
+ // DefaultValue specifies the default value for a flag.
|
||
+ DefaultValue = "docs.default-value"
|
||
+)
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/clidocstool.go b/vendor/github.com/docker/cli-docs-tool/clidocstool.go
|
||
new file mode 100644
|
||
index 000000000000..d4aeaba3f126
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/clidocstool.go
|
||
@@ -0,0 +1,123 @@
|
||
+// Copyright 2017 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package clidocstool
|
||
+
|
||
+import (
|
||
+ "errors"
|
||
+ "io"
|
||
+ "os"
|
||
+ "strings"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+)
|
||
+
|
||
+// Options defines options for cli-docs-tool
|
||
+type Options struct {
|
||
+ Root *cobra.Command
|
||
+ SourceDir string
|
||
+ TargetDir string
|
||
+ Plugin bool
|
||
+}
|
||
+
|
||
+// Client represents an active cli-docs-tool object
|
||
+type Client struct {
|
||
+ root *cobra.Command
|
||
+ source string
|
||
+ target string
|
||
+ plugin bool
|
||
+}
|
||
+
|
||
+// New initializes a new cli-docs-tool client
|
||
+func New(opts Options) (*Client, error) {
|
||
+ if opts.Root == nil {
|
||
+ return nil, errors.New("root cmd required")
|
||
+ }
|
||
+ if len(opts.SourceDir) == 0 {
|
||
+ return nil, errors.New("source dir required")
|
||
+ }
|
||
+ c := &Client{
|
||
+ root: opts.Root,
|
||
+ source: opts.SourceDir,
|
||
+ plugin: opts.Plugin,
|
||
+ }
|
||
+ if len(opts.TargetDir) == 0 {
|
||
+ c.target = c.source
|
||
+ } else {
|
||
+ c.target = opts.TargetDir
|
||
+ }
|
||
+ if err := os.MkdirAll(c.target, 0755); err != nil {
|
||
+ return nil, err
|
||
+ }
|
||
+ return c, nil
|
||
+}
|
||
+
|
||
+// GenAllTree creates all structured ref files for this command and
|
||
+// all descendants in the directory given.
|
||
+func (c *Client) GenAllTree() error {
|
||
+ var err error
|
||
+ if err = c.GenMarkdownTree(c.root); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err = c.GenYamlTree(c.root); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func fileExists(f string) bool {
|
||
+ info, err := os.Stat(f)
|
||
+ if os.IsNotExist(err) {
|
||
+ return false
|
||
+ }
|
||
+ return !info.IsDir()
|
||
+}
|
||
+
|
||
+func copyFile(src string, dst string) error {
|
||
+ sf, err := os.Open(src)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer sf.Close()
|
||
+ df, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY, 0o600)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer df.Close()
|
||
+ _, err = io.Copy(df, sf)
|
||
+ return err
|
||
+}
|
||
+
|
||
+func getAliases(cmd *cobra.Command) []string {
|
||
+ if a := cmd.Annotations["aliases"]; a != "" {
|
||
+ aliases := strings.Split(a, ",")
|
||
+ for i := 0; i < len(aliases); i++ {
|
||
+ aliases[i] = strings.TrimSpace(aliases[i])
|
||
+ }
|
||
+ return aliases
|
||
+ }
|
||
+ if len(cmd.Aliases) == 0 {
|
||
+ return cmd.Aliases
|
||
+ }
|
||
+
|
||
+ var parentPath string
|
||
+ if cmd.HasParent() {
|
||
+ parentPath = cmd.Parent().CommandPath() + " "
|
||
+ }
|
||
+ aliases := []string{cmd.CommandPath()}
|
||
+ for _, a := range cmd.Aliases {
|
||
+ aliases = append(aliases, parentPath+a)
|
||
+ }
|
||
+ return aliases
|
||
+}
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/clidocstool_md.go b/vendor/github.com/docker/cli-docs-tool/clidocstool_md.go
|
||
new file mode 100644
|
||
index 000000000000..1dee58c06ca1
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/clidocstool_md.go
|
||
@@ -0,0 +1,280 @@
|
||
+// Copyright 2021 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package clidocstool
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "log"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "regexp"
|
||
+ "strings"
|
||
+ "text/tabwriter"
|
||
+ "text/template"
|
||
+
|
||
+ "github.com/docker/cli-docs-tool/annotation"
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/pflag"
|
||
+)
|
||
+
|
||
+var (
|
||
+ nlRegexp = regexp.MustCompile(`\r?\n`)
|
||
+ adjustSep = regexp.MustCompile(`\|:---(\s+)`)
|
||
+)
|
||
+
|
||
+// GenMarkdownTree will generate a markdown page for this command and all
|
||
+// descendants in the directory given.
|
||
+func (c *Client) GenMarkdownTree(cmd *cobra.Command) error {
|
||
+ for _, sc := range cmd.Commands() {
|
||
+ if err := c.GenMarkdownTree(sc); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // always disable the addition of [flags] to the usage
|
||
+ cmd.DisableFlagsInUseLine = true
|
||
+
|
||
+ // Skip the root command altogether, to prevent generating a useless
|
||
+ // md file for plugins.
|
||
+ if c.plugin && !cmd.HasParent() {
|
||
+ return nil
|
||
+ }
|
||
+
|
||
+ // Skip hidden command
|
||
+ if cmd.Hidden {
|
||
+ log.Printf("INFO: Skipping Markdown for %q (hidden command)", cmd.CommandPath())
|
||
+ return nil
|
||
+ }
|
||
+
|
||
+ log.Printf("INFO: Generating Markdown for %q", cmd.CommandPath())
|
||
+ mdFile := mdFilename(cmd)
|
||
+ sourcePath := filepath.Join(c.source, mdFile)
|
||
+ targetPath := filepath.Join(c.target, mdFile)
|
||
+
|
||
+ // check recursively to handle inherited annotations
|
||
+ for curr := cmd; curr != nil; curr = curr.Parent() {
|
||
+ if _, ok := cmd.Annotations[annotation.CodeDelimiter]; !ok {
|
||
+ if cd, cok := curr.Annotations[annotation.CodeDelimiter]; cok {
|
||
+ if cmd.Annotations == nil {
|
||
+ cmd.Annotations = map[string]string{}
|
||
+ }
|
||
+ cmd.Annotations[annotation.CodeDelimiter] = cd
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if !fileExists(sourcePath) {
|
||
+ var icBuf bytes.Buffer
|
||
+ icTpl, err := template.New("ic").Option("missingkey=error").Parse(`# {{ .Command }}
|
||
+
|
||
+<!---MARKER_GEN_START-->
|
||
+<!---MARKER_GEN_END-->
|
||
+
|
||
+`)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err = icTpl.Execute(&icBuf, struct {
|
||
+ Command string
|
||
+ }{
|
||
+ Command: cmd.CommandPath(),
|
||
+ }); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err = os.WriteFile(targetPath, icBuf.Bytes(), 0644); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ } else if err := copyFile(sourcePath, targetPath); err != nil {
|
||
+ return err
|
||
+ }
|
||
+
|
||
+ content, err := os.ReadFile(targetPath)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+
|
||
+ cs := string(content)
|
||
+
|
||
+ start := strings.Index(cs, "<!---MARKER_GEN_START-->")
|
||
+ end := strings.Index(cs, "<!---MARKER_GEN_END-->")
|
||
+
|
||
+ if start == -1 {
|
||
+ return fmt.Errorf("no start marker in %s", mdFile)
|
||
+ }
|
||
+ if end == -1 {
|
||
+ return fmt.Errorf("no end marker in %s", mdFile)
|
||
+ }
|
||
+
|
||
+ out, err := mdCmdOutput(cmd, cs)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ cont := cs[:start] + "<!---MARKER_GEN_START-->" + "\n" + out + "\n" + cs[end:]
|
||
+
|
||
+ fi, err := os.Stat(targetPath)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err = os.WriteFile(targetPath, []byte(cont), fi.Mode()); err != nil {
|
||
+ return fmt.Errorf("failed to write %s: %w", targetPath, err)
|
||
+ }
|
||
+
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func mdFilename(cmd *cobra.Command) string {
|
||
+ name := cmd.CommandPath()
|
||
+ if i := strings.Index(name, " "); i >= 0 {
|
||
+ name = name[i+1:]
|
||
+ }
|
||
+ return strings.ReplaceAll(name, " ", "_") + ".md"
|
||
+}
|
||
+
|
||
+func mdMakeLink(txt, link string, f *pflag.Flag, isAnchor bool) string {
|
||
+ link = "#" + link
|
||
+ annotations, ok := f.Annotations[annotation.ExternalURL]
|
||
+ if ok && len(annotations) > 0 {
|
||
+ link = annotations[0]
|
||
+ } else {
|
||
+ if !isAnchor {
|
||
+ return txt
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return "[" + txt + "](" + link + ")"
|
||
+}
|
||
+
|
||
+type mdTable struct {
|
||
+ out *strings.Builder
|
||
+ tabWriter *tabwriter.Writer
|
||
+}
|
||
+
|
||
+func newMdTable(headers ...string) *mdTable {
|
||
+ w := &strings.Builder{}
|
||
+ t := &mdTable{
|
||
+ out: w,
|
||
+ // Using tabwriter.Debug, which uses "|" as separator instead of tabs,
|
||
+ // which is what we want. It's a bit of a hack, but does the job :)
|
||
+ tabWriter: tabwriter.NewWriter(w, 5, 5, 1, ' ', tabwriter.Debug),
|
||
+ }
|
||
+ t.addHeader(headers...)
|
||
+ return t
|
||
+}
|
||
+
|
||
+func (t *mdTable) addHeader(cols ...string) {
|
||
+ t.AddRow(cols...)
|
||
+ _, _ = t.tabWriter.Write([]byte("|" + strings.Repeat(":---\t", len(cols)) + "\n"))
|
||
+}
|
||
+
|
||
+func (t *mdTable) AddRow(cols ...string) {
|
||
+ for i := range cols {
|
||
+ cols[i] = mdEscapePipe(cols[i])
|
||
+ }
|
||
+ _, _ = t.tabWriter.Write([]byte("| " + strings.Join(cols, "\t ") + "\t\n"))
|
||
+}
|
||
+
|
||
+func (t *mdTable) String() string {
|
||
+ _ = t.tabWriter.Flush()
|
||
+ return adjustSep.ReplaceAllStringFunc(t.out.String()+"\n", func(in string) string {
|
||
+ return strings.ReplaceAll(in, " ", "-")
|
||
+ })
|
||
+}
|
||
+
|
||
+func mdCmdOutput(cmd *cobra.Command, old string) (string, error) {
|
||
+ b := &strings.Builder{}
|
||
+
|
||
+ desc := cmd.Short
|
||
+ if cmd.Long != "" {
|
||
+ desc = cmd.Long
|
||
+ }
|
||
+ if desc != "" {
|
||
+ b.WriteString(desc + "\n\n")
|
||
+ }
|
||
+
|
||
+ if aliases := getAliases(cmd); len(aliases) != 0 {
|
||
+ b.WriteString("### Aliases\n\n")
|
||
+ b.WriteString("`" + strings.Join(aliases, "`, `") + "`")
|
||
+ b.WriteString("\n\n")
|
||
+ }
|
||
+
|
||
+ if len(cmd.Commands()) != 0 {
|
||
+ b.WriteString("### Subcommands\n\n")
|
||
+ table := newMdTable("Name", "Description")
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if c.Hidden {
|
||
+ continue
|
||
+ }
|
||
+ table.AddRow(fmt.Sprintf("[`%s`](%s)", c.Name(), mdFilename(c)), c.Short)
|
||
+ }
|
||
+ b.WriteString(table.String() + "\n")
|
||
+ }
|
||
+
|
||
+ // add inherited flags before checking for flags availability
|
||
+ cmd.Flags().AddFlagSet(cmd.InheritedFlags())
|
||
+
|
||
+ if cmd.Flags().HasAvailableFlags() {
|
||
+ b.WriteString("### Options\n\n")
|
||
+ table := newMdTable("Name", "Type", "Default", "Description")
|
||
+ cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||
+ if f.Hidden {
|
||
+ return
|
||
+ }
|
||
+ isLink := strings.Contains(old, "<a name=\""+f.Name+"\"></a>")
|
||
+ var name string
|
||
+ if f.Shorthand != "" {
|
||
+ name = mdMakeLink("`-"+f.Shorthand+"`", f.Name, f, isLink)
|
||
+ name += ", "
|
||
+ }
|
||
+ name += mdMakeLink("`--"+f.Name+"`", f.Name, f, isLink)
|
||
+
|
||
+ var ftype string
|
||
+ if f.Value.Type() != "bool" {
|
||
+ ftype = "`" + f.Value.Type() + "`"
|
||
+ }
|
||
+
|
||
+ var defval string
|
||
+ if v, ok := f.Annotations[annotation.DefaultValue]; ok && len(v) > 0 {
|
||
+ defval = v[0]
|
||
+ if cd, ok := f.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ defval = strings.ReplaceAll(defval, cd[0], "`")
|
||
+ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ defval = strings.ReplaceAll(defval, cd, "`")
|
||
+ }
|
||
+ } else if f.DefValue != "" && (f.Value.Type() != "bool" && f.DefValue != "true") && f.DefValue != "[]" {
|
||
+ defval = "`" + f.DefValue + "`"
|
||
+ }
|
||
+
|
||
+ usage := f.Usage
|
||
+ if cd, ok := f.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ usage = strings.ReplaceAll(usage, cd[0], "`")
|
||
+ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ usage = strings.ReplaceAll(usage, cd, "`")
|
||
+ }
|
||
+ table.AddRow(name, ftype, defval, mdReplaceNewline(usage))
|
||
+ })
|
||
+ b.WriteString(table.String())
|
||
+ }
|
||
+
|
||
+ return b.String(), nil
|
||
+}
|
||
+
|
||
+func mdEscapePipe(s string) string {
|
||
+ return strings.ReplaceAll(s, `|`, `\|`)
|
||
+}
|
||
+
|
||
+func mdReplaceNewline(s string) string {
|
||
+ return nlRegexp.ReplaceAllString(s, "<br>")
|
||
+}
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go b/vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go
|
||
new file mode 100644
|
||
index 000000000000..523524297af4
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/clidocstool_yaml.go
|
||
@@ -0,0 +1,435 @@
|
||
+// Copyright 2017 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package clidocstool
|
||
+
|
||
+import (
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "log"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "sort"
|
||
+ "strings"
|
||
+
|
||
+ "github.com/docker/cli-docs-tool/annotation"
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/pflag"
|
||
+ "gopkg.in/yaml.v3"
|
||
+)
|
||
+
|
||
+type cmdOption struct {
|
||
+ Option string
|
||
+ Shorthand string `yaml:",omitempty"`
|
||
+ ValueType string `yaml:"value_type,omitempty"`
|
||
+ DefaultValue string `yaml:"default_value,omitempty"`
|
||
+ Description string `yaml:",omitempty"`
|
||
+ DetailsURL string `yaml:"details_url,omitempty"` // DetailsURL contains an anchor-id or link for more information on this flag
|
||
+ Deprecated bool
|
||
+ Hidden bool
|
||
+ MinAPIVersion string `yaml:"min_api_version,omitempty"`
|
||
+ Experimental bool
|
||
+ ExperimentalCLI bool
|
||
+ Kubernetes bool
|
||
+ Swarm bool
|
||
+ OSType string `yaml:"os_type,omitempty"`
|
||
+}
|
||
+
|
||
+type cmdDoc struct {
|
||
+ Name string `yaml:"command"`
|
||
+ SeeAlso []string `yaml:"parent,omitempty"`
|
||
+ Version string `yaml:"engine_version,omitempty"`
|
||
+ Aliases string `yaml:",omitempty"`
|
||
+ Short string `yaml:",omitempty"`
|
||
+ Long string `yaml:",omitempty"`
|
||
+ Usage string `yaml:",omitempty"`
|
||
+ Pname string `yaml:",omitempty"`
|
||
+ Plink string `yaml:",omitempty"`
|
||
+ Cname []string `yaml:",omitempty"`
|
||
+ Clink []string `yaml:",omitempty"`
|
||
+ Options []cmdOption `yaml:",omitempty"`
|
||
+ InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"`
|
||
+ Example string `yaml:"examples,omitempty"`
|
||
+ Deprecated bool
|
||
+ Hidden bool
|
||
+ MinAPIVersion string `yaml:"min_api_version,omitempty"`
|
||
+ Experimental bool
|
||
+ ExperimentalCLI bool
|
||
+ Kubernetes bool
|
||
+ Swarm bool
|
||
+ OSType string `yaml:"os_type,omitempty"`
|
||
+}
|
||
+
|
||
+// GenYamlTree creates yaml structured ref files for this command and all descendants
|
||
+// in the directory given. This function may not work
|
||
+// correctly if your command names have `-` in them. If you have `cmd` with two
|
||
+// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third`
|
||
+// it is undefined which help output will be in the file `cmd-sub-third.1`.
|
||
+func (c *Client) GenYamlTree(cmd *cobra.Command) error {
|
||
+ emptyStr := func(s string) string { return "" }
|
||
+ if err := c.loadLongDescription(cmd); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return c.genYamlTreeCustom(cmd, emptyStr)
|
||
+}
|
||
+
|
||
+// genYamlTreeCustom creates yaml structured ref files.
|
||
+func (c *Client) genYamlTreeCustom(cmd *cobra.Command, filePrepender func(string) string) error {
|
||
+ for _, sc := range cmd.Commands() {
|
||
+ if !sc.Runnable() && !sc.HasAvailableSubCommands() {
|
||
+ // skip non-runnable commands without subcommands
|
||
+ // but *do* generate YAML for hidden and deprecated commands
|
||
+ // the YAML will have those included as metadata, so that the
|
||
+ // documentation repository can decide whether or not to present them
|
||
+ continue
|
||
+ }
|
||
+ if err := c.genYamlTreeCustom(sc, filePrepender); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // always disable the addition of [flags] to the usage
|
||
+ cmd.DisableFlagsInUseLine = true
|
||
+
|
||
+ // The "root" command used in the generator is just a "stub", and only has a
|
||
+ // list of subcommands, but not (e.g.) global options/flags. We should fix
|
||
+ // that, so that the YAML file for the docker "root" command contains the
|
||
+ // global flags.
|
||
+
|
||
+ // Skip the root command altogether, to prevent generating a useless
|
||
+ // YAML file for plugins.
|
||
+ if c.plugin && !cmd.HasParent() {
|
||
+ return nil
|
||
+ }
|
||
+
|
||
+ log.Printf("INFO: Generating YAML for %q", cmd.CommandPath())
|
||
+ basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".yaml"
|
||
+ target := filepath.Join(c.target, basename)
|
||
+ f, err := os.Create(target)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer f.Close()
|
||
+
|
||
+ if _, err := io.WriteString(f, filePrepender(target)); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return c.genYamlCustom(cmd, f)
|
||
+}
|
||
+
|
||
+// genYamlCustom creates custom yaml output.
|
||
+// nolint: gocyclo
|
||
+func (c *Client) genYamlCustom(cmd *cobra.Command, w io.Writer) error {
|
||
+ const (
|
||
+ // shortMaxWidth is the maximum width for the "Short" description before
|
||
+ // we force YAML to use multi-line syntax. The goal is to make the total
|
||
+ // width fit within 80 characters. This value is based on 80 characters
|
||
+ // minus the with of the field, colon, and whitespace ('short: ').
|
||
+ shortMaxWidth = 73
|
||
+
|
||
+ // longMaxWidth is the maximum width for the "Short" description before
|
||
+ // we force YAML to use multi-line syntax. The goal is to make the total
|
||
+ // width fit within 80 characters. This value is based on 80 characters
|
||
+ // minus the with of the field, colon, and whitespace ('long: ').
|
||
+ longMaxWidth = 74
|
||
+ )
|
||
+
|
||
+ // necessary to add inherited flags otherwise some
|
||
+ // fields are not properly declared like usage
|
||
+ cmd.Flags().AddFlagSet(cmd.InheritedFlags())
|
||
+
|
||
+ cliDoc := cmdDoc{
|
||
+ Name: cmd.CommandPath(),
|
||
+ Aliases: strings.Join(getAliases(cmd), ", "),
|
||
+ Short: forceMultiLine(cmd.Short, shortMaxWidth),
|
||
+ Long: forceMultiLine(cmd.Long, longMaxWidth),
|
||
+ Example: cmd.Example,
|
||
+ Deprecated: len(cmd.Deprecated) > 0,
|
||
+ Hidden: cmd.Hidden,
|
||
+ }
|
||
+
|
||
+ if len(cliDoc.Long) == 0 {
|
||
+ cliDoc.Long = cliDoc.Short
|
||
+ }
|
||
+
|
||
+ if cmd.Runnable() {
|
||
+ cliDoc.Usage = cmd.UseLine()
|
||
+ }
|
||
+
|
||
+ // check recursively to handle inherited annotations
|
||
+ for curr := cmd; curr != nil; curr = curr.Parent() {
|
||
+ if v, ok := curr.Annotations["version"]; ok && cliDoc.MinAPIVersion == "" {
|
||
+ cliDoc.MinAPIVersion = v
|
||
+ }
|
||
+ if _, ok := curr.Annotations["experimental"]; ok && !cliDoc.Experimental {
|
||
+ cliDoc.Experimental = true
|
||
+ }
|
||
+ if _, ok := curr.Annotations["experimentalCLI"]; ok && !cliDoc.ExperimentalCLI {
|
||
+ cliDoc.ExperimentalCLI = true
|
||
+ }
|
||
+ if _, ok := curr.Annotations["kubernetes"]; ok && !cliDoc.Kubernetes {
|
||
+ cliDoc.Kubernetes = true
|
||
+ }
|
||
+ if _, ok := curr.Annotations["swarm"]; ok && !cliDoc.Swarm {
|
||
+ cliDoc.Swarm = true
|
||
+ }
|
||
+ if o, ok := curr.Annotations["ostype"]; ok && cliDoc.OSType == "" {
|
||
+ cliDoc.OSType = o
|
||
+ }
|
||
+ if _, ok := cmd.Annotations[annotation.CodeDelimiter]; !ok {
|
||
+ if cd, cok := curr.Annotations[annotation.CodeDelimiter]; cok {
|
||
+ if cmd.Annotations == nil {
|
||
+ cmd.Annotations = map[string]string{}
|
||
+ }
|
||
+ cmd.Annotations[annotation.CodeDelimiter] = cd
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ anchors := make(map[string]struct{})
|
||
+ if a, ok := cmd.Annotations["anchors"]; ok && a != "" {
|
||
+ for _, anchor := range strings.Split(a, ",") {
|
||
+ anchors[anchor] = struct{}{}
|
||
+ }
|
||
+ }
|
||
+
|
||
+ flags := cmd.NonInheritedFlags()
|
||
+ if flags.HasFlags() {
|
||
+ cliDoc.Options = genFlagResult(cmd, flags, anchors)
|
||
+ }
|
||
+ flags = cmd.InheritedFlags()
|
||
+ if flags.HasFlags() {
|
||
+ cliDoc.InheritedOptions = genFlagResult(cmd, flags, anchors)
|
||
+ }
|
||
+
|
||
+ if hasSeeAlso(cmd) {
|
||
+ if cmd.HasParent() {
|
||
+ parent := cmd.Parent()
|
||
+ cliDoc.Pname = parent.CommandPath()
|
||
+ cliDoc.Plink = strings.Replace(cliDoc.Pname, " ", "_", -1) + ".yaml"
|
||
+ cmd.VisitParents(func(c *cobra.Command) {
|
||
+ if c.DisableAutoGenTag {
|
||
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
|
||
+ }
|
||
+ })
|
||
+ }
|
||
+
|
||
+ children := cmd.Commands()
|
||
+ sort.Sort(byName(children))
|
||
+
|
||
+ for _, child := range children {
|
||
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ cliDoc.Cname = append(cliDoc.Cname, cliDoc.Name+" "+child.Name())
|
||
+ cliDoc.Clink = append(cliDoc.Clink, strings.Replace(cliDoc.Name+"_"+child.Name(), " ", "_", -1)+".yaml")
|
||
+ }
|
||
+ }
|
||
+
|
||
+ final, err := yaml.Marshal(&cliDoc)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+ if _, err := fmt.Fprintln(w, string(final)); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func genFlagResult(cmd *cobra.Command, flags *pflag.FlagSet, anchors map[string]struct{}) []cmdOption {
|
||
+ var (
|
||
+ result []cmdOption
|
||
+ opt cmdOption
|
||
+ )
|
||
+
|
||
+ const (
|
||
+ // shortMaxWidth is the maximum width for the "Short" description before
|
||
+ // we force YAML to use multi-line syntax. The goal is to make the total
|
||
+ // width fit within 80 characters. This value is based on 80 characters
|
||
+ // minus the with of the field, colon, and whitespace (' default_value: ').
|
||
+ defaultValueMaxWidth = 64
|
||
+
|
||
+ // longMaxWidth is the maximum width for the "Short" description before
|
||
+ // we force YAML to use multi-line syntax. The goal is to make the total
|
||
+ // width fit within 80 characters. This value is based on 80 characters
|
||
+ // minus the with of the field, colon, and whitespace (' description: ').
|
||
+ descriptionMaxWidth = 66
|
||
+ )
|
||
+
|
||
+ flags.VisitAll(func(flag *pflag.Flag) {
|
||
+ opt = cmdOption{
|
||
+ Option: flag.Name,
|
||
+ ValueType: flag.Value.Type(),
|
||
+ Deprecated: len(flag.Deprecated) > 0,
|
||
+ Hidden: flag.Hidden,
|
||
+ }
|
||
+
|
||
+ var defval string
|
||
+ if v, ok := flag.Annotations[annotation.DefaultValue]; ok && len(v) > 0 {
|
||
+ defval = v[0]
|
||
+ if cd, ok := flag.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ defval = strings.ReplaceAll(defval, cd[0], "`")
|
||
+ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ defval = strings.ReplaceAll(defval, cd, "`")
|
||
+ }
|
||
+ } else {
|
||
+ defval = flag.DefValue
|
||
+ }
|
||
+ opt.DefaultValue = forceMultiLine(defval, defaultValueMaxWidth)
|
||
+
|
||
+ usage := flag.Usage
|
||
+ if cd, ok := flag.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ usage = strings.ReplaceAll(usage, cd[0], "`")
|
||
+ } else if cd, ok := cmd.Annotations[annotation.CodeDelimiter]; ok {
|
||
+ usage = strings.ReplaceAll(usage, cd, "`")
|
||
+ }
|
||
+ opt.Description = forceMultiLine(usage, descriptionMaxWidth)
|
||
+
|
||
+ if v, ok := flag.Annotations[annotation.ExternalURL]; ok && len(v) > 0 {
|
||
+ opt.DetailsURL = strings.TrimPrefix(v[0], "https://docs.docker.com")
|
||
+ } else if _, ok = anchors[flag.Name]; ok {
|
||
+ opt.DetailsURL = "#" + flag.Name
|
||
+ }
|
||
+
|
||
+ // Todo, when we mark a shorthand is deprecated, but specify an empty message.
|
||
+ // The flag.ShorthandDeprecated is empty as the shorthand is deprecated.
|
||
+ // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok.
|
||
+ if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 {
|
||
+ opt.Shorthand = flag.Shorthand
|
||
+ }
|
||
+ if _, ok := flag.Annotations["experimental"]; ok {
|
||
+ opt.Experimental = true
|
||
+ }
|
||
+ if _, ok := flag.Annotations["deprecated"]; ok {
|
||
+ opt.Deprecated = true
|
||
+ }
|
||
+ if v, ok := flag.Annotations["version"]; ok {
|
||
+ opt.MinAPIVersion = v[0]
|
||
+ }
|
||
+ if _, ok := flag.Annotations["experimentalCLI"]; ok {
|
||
+ opt.ExperimentalCLI = true
|
||
+ }
|
||
+ if _, ok := flag.Annotations["kubernetes"]; ok {
|
||
+ opt.Kubernetes = true
|
||
+ }
|
||
+ if _, ok := flag.Annotations["swarm"]; ok {
|
||
+ opt.Swarm = true
|
||
+ }
|
||
+
|
||
+ // Note that the annotation can have multiple ostypes set, however, multiple
|
||
+ // values are currently not used (and unlikely will).
|
||
+ //
|
||
+ // To simplify usage of the os_type property in the YAML, and for consistency
|
||
+ // with the same property for commands, we're only using the first ostype that's set.
|
||
+ if ostypes, ok := flag.Annotations["ostype"]; ok && len(opt.OSType) == 0 && len(ostypes) > 0 {
|
||
+ opt.OSType = ostypes[0]
|
||
+ }
|
||
+
|
||
+ result = append(result, opt)
|
||
+ })
|
||
+
|
||
+ return result
|
||
+}
|
||
+
|
||
+// forceMultiLine appends a newline (\n) to strings that are longer than max
|
||
+// to force the yaml lib to use block notation (https://yaml.org/spec/1.2/spec.html#Block)
|
||
+// instead of a single-line string with newlines and tabs encoded("string\nline1\nline2").
|
||
+//
|
||
+// This makes the generated YAML more readable, and easier to review changes.
|
||
+// max can be used to customize the width to keep the whole line < 80 chars.
|
||
+func forceMultiLine(s string, max int) string {
|
||
+ s = strings.TrimSpace(s)
|
||
+ if len(s) > max && !strings.Contains(s, "\n") {
|
||
+ s = s + "\n"
|
||
+ }
|
||
+ return s
|
||
+}
|
||
+
|
||
+// Small duplication for cobra utils
|
||
+func hasSeeAlso(cmd *cobra.Command) bool {
|
||
+ if cmd.HasParent() {
|
||
+ return true
|
||
+ }
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ return true
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// loadLongDescription gets long descriptions and examples from markdown.
|
||
+func (c *Client) loadLongDescription(parentCmd *cobra.Command) error {
|
||
+ for _, cmd := range parentCmd.Commands() {
|
||
+ if cmd.HasSubCommands() {
|
||
+ if err := c.loadLongDescription(cmd); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+ name := cmd.CommandPath()
|
||
+ if i := strings.Index(name, " "); i >= 0 {
|
||
+ // remove root command / binary name
|
||
+ name = name[i+1:]
|
||
+ }
|
||
+ if name == "" {
|
||
+ continue
|
||
+ }
|
||
+ mdFile := strings.ReplaceAll(name, " ", "_") + ".md"
|
||
+ sourcePath := filepath.Join(c.source, mdFile)
|
||
+ content, err := os.ReadFile(sourcePath)
|
||
+ if os.IsNotExist(err) {
|
||
+ log.Printf("WARN: %s does not exist, skipping Markdown examples for YAML doc\n", mdFile)
|
||
+ continue
|
||
+ }
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ applyDescriptionAndExamples(cmd, string(content))
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// applyDescriptionAndExamples fills in cmd.Long and cmd.Example with the
|
||
+// "Description" and "Examples" H2 sections in mdString (if present).
|
||
+func applyDescriptionAndExamples(cmd *cobra.Command, mdString string) {
|
||
+ sections := getSections(mdString)
|
||
+ var (
|
||
+ anchors []string
|
||
+ md string
|
||
+ )
|
||
+ if sections["description"] != "" {
|
||
+ md, anchors = cleanupMarkDown(sections["description"])
|
||
+ cmd.Long = md
|
||
+ anchors = append(anchors, md)
|
||
+ }
|
||
+ if sections["examples"] != "" {
|
||
+ md, anchors = cleanupMarkDown(sections["examples"])
|
||
+ cmd.Example = md
|
||
+ anchors = append(anchors, md)
|
||
+ }
|
||
+ if len(anchors) > 0 {
|
||
+ if cmd.Annotations == nil {
|
||
+ cmd.Annotations = make(map[string]string)
|
||
+ }
|
||
+ cmd.Annotations["anchors"] = strings.Join(anchors, ",")
|
||
+ }
|
||
+}
|
||
+
|
||
+type byName []*cobra.Command
|
||
+
|
||
+func (s byName) Len() int { return len(s) }
|
||
+func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||
+func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/docker-bake.hcl b/vendor/github.com/docker/cli-docs-tool/docker-bake.hcl
|
||
new file mode 100644
|
||
index 000000000000..4a5f44f83018
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/docker-bake.hcl
|
||
@@ -0,0 +1,51 @@
|
||
+// Copyright 2021 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+group "default" {
|
||
+ targets = ["test"]
|
||
+}
|
||
+
|
||
+group "validate" {
|
||
+ targets = ["lint", "vendor-validate", "license-validate"]
|
||
+}
|
||
+
|
||
+target "lint" {
|
||
+ target = "lint"
|
||
+ output = ["type=cacheonly"]
|
||
+}
|
||
+
|
||
+target "vendor-validate" {
|
||
+ target = "vendor-validate"
|
||
+ output = ["type=cacheonly"]
|
||
+}
|
||
+
|
||
+target "vendor-update" {
|
||
+ target = "vendor-update"
|
||
+ output = ["."]
|
||
+}
|
||
+
|
||
+target "test" {
|
||
+ target = "test-coverage"
|
||
+ output = ["."]
|
||
+}
|
||
+
|
||
+target "license-validate" {
|
||
+ target = "license-validate"
|
||
+ output = ["type=cacheonly"]
|
||
+}
|
||
+
|
||
+target "license-update" {
|
||
+ target = "license-update"
|
||
+ output = ["."]
|
||
+}
|
||
diff --git a/vendor/github.com/docker/cli-docs-tool/markdown.go b/vendor/github.com/docker/cli-docs-tool/markdown.go
|
||
new file mode 100644
|
||
index 000000000000..32849236ed9c
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/docker/cli-docs-tool/markdown.go
|
||
@@ -0,0 +1,87 @@
|
||
+// Copyright 2017 cli-docs-tool authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package clidocstool
|
||
+
|
||
+import (
|
||
+ "regexp"
|
||
+ "strings"
|
||
+ "unicode"
|
||
+)
|
||
+
|
||
+var (
|
||
+ // mdHeading matches MarkDown H1..h6 headings. Note that this regex may produce
|
||
+ // false positives for (e.g.) comments in code-blocks (# this is a comment),
|
||
+ // so should not be used as a generic regex for other purposes.
|
||
+ mdHeading = regexp.MustCompile(`^([#]{1,6})\s(.*)$`)
|
||
+ // htmlAnchor matches inline HTML anchors. This is intended to only match anchors
|
||
+ // for our use-case; DO NOT consider using this as a generic regex, or at least
|
||
+ // not before reading https://stackoverflow.com/a/1732454/1811501.
|
||
+ htmlAnchor = regexp.MustCompile(`<a\s+(?:name|id)="?([^"]+)"?\s*></a>\s*`)
|
||
+)
|
||
+
|
||
+// getSections returns all H2 sections by title (lowercase)
|
||
+func getSections(mdString string) map[string]string {
|
||
+ parsedContent := strings.Split("\n"+mdString, "\n## ")
|
||
+ sections := make(map[string]string, len(parsedContent))
|
||
+ for _, s := range parsedContent {
|
||
+ if strings.HasPrefix(s, "#") {
|
||
+ // not a H2 Section
|
||
+ continue
|
||
+ }
|
||
+ parts := strings.SplitN(s, "\n", 2)
|
||
+ if len(parts) == 2 {
|
||
+ sections[strings.ToLower(parts[0])] = parts[1]
|
||
+ }
|
||
+ }
|
||
+ return sections
|
||
+}
|
||
+
|
||
+// cleanupMarkDown cleans up the MarkDown passed in mdString for inclusion in
|
||
+// YAML. It removes trailing whitespace and substitutes tabs for four spaces
|
||
+// to prevent YAML switching to use "compact" form; ("line1 \nline\t2\n")
|
||
+// which, although equivalent, is hard to read.
|
||
+func cleanupMarkDown(mdString string) (md string, anchors []string) {
|
||
+ // remove leading/trailing whitespace, and replace tabs in the whole content
|
||
+ mdString = strings.TrimSpace(mdString)
|
||
+ mdString = strings.ReplaceAll(mdString, "\t", " ")
|
||
+ mdString = strings.ReplaceAll(mdString, "https://docs.docker.com", "")
|
||
+
|
||
+ var id string
|
||
+ // replace trailing whitespace per line, and handle custom anchors
|
||
+ lines := strings.Split(mdString, "\n")
|
||
+ for i := 0; i < len(lines); i++ {
|
||
+ lines[i] = strings.TrimRightFunc(lines[i], unicode.IsSpace)
|
||
+ lines[i], id = convertHTMLAnchor(lines[i])
|
||
+ if id != "" {
|
||
+ anchors = append(anchors, id)
|
||
+ }
|
||
+ }
|
||
+ return strings.Join(lines, "\n"), anchors
|
||
+}
|
||
+
|
||
+// convertHTMLAnchor converts inline anchor-tags in headings (<a name=myanchor></a>)
|
||
+// to an extended-markdown property ({#myanchor}). Extended Markdown properties
|
||
+// are not supported in GitHub Flavored Markdown, but are supported by Jekyll,
|
||
+// and lead to cleaner HTML in our docs, and prevents duplicate anchors.
|
||
+// It returns the converted MarkDown heading and the custom ID (if present)
|
||
+func convertHTMLAnchor(mdLine string) (md string, customID string) {
|
||
+ if m := mdHeading.FindStringSubmatch(mdLine); len(m) > 0 {
|
||
+ if a := htmlAnchor.FindStringSubmatch(m[2]); len(a) > 0 {
|
||
+ customID = a[1]
|
||
+ mdLine = m[1] + " " + htmlAnchor.ReplaceAllString(m[2], "") + " {#" + customID + "}"
|
||
+ }
|
||
+ }
|
||
+ return mdLine, customID
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore
|
||
new file mode 100644
|
||
index 000000000000..75623dcccbb7
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/.gitignore
|
||
@@ -0,0 +1,8 @@
|
||
+*.out
|
||
+*.swp
|
||
+*.8
|
||
+*.6
|
||
+_obj
|
||
+_test*
|
||
+markdown
|
||
+tags
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml
|
||
new file mode 100644
|
||
index 000000000000..b0b525a5a8e1
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/.travis.yml
|
||
@@ -0,0 +1,17 @@
|
||
+sudo: false
|
||
+language: go
|
||
+go:
|
||
+ - "1.10.x"
|
||
+ - "1.11.x"
|
||
+ - tip
|
||
+matrix:
|
||
+ fast_finish: true
|
||
+ allow_failures:
|
||
+ - go: tip
|
||
+install:
|
||
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||
+script:
|
||
+ - go get -t -v ./...
|
||
+ - diff -u <(echo -n) <(gofmt -d -s .)
|
||
+ - go tool vet .
|
||
+ - go test -v ./...
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt
|
||
new file mode 100644
|
||
index 000000000000..2885af3602d8
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt
|
||
@@ -0,0 +1,29 @@
|
||
+Blackfriday is distributed under the Simplified BSD License:
|
||
+
|
||
+> Copyright © 2011 Russ Ross
|
||
+> All rights reserved.
|
||
+>
|
||
+> Redistribution and use in source and binary forms, with or without
|
||
+> modification, are permitted provided that the following conditions
|
||
+> are met:
|
||
+>
|
||
+> 1. Redistributions of source code must retain the above copyright
|
||
+> notice, this list of conditions and the following disclaimer.
|
||
+>
|
||
+> 2. Redistributions in binary form must reproduce the above
|
||
+> copyright notice, this list of conditions and the following
|
||
+> disclaimer in the documentation and/or other materials provided with
|
||
+> the distribution.
|
||
+>
|
||
+> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||
+> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||
+> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||
+> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||
+> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||
+> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||
+> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||
+> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||
+> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||
+> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||
+> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||
+> POSSIBILITY OF SUCH DAMAGE.
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md
|
||
new file mode 100644
|
||
index 000000000000..d9c08a22fc54
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/README.md
|
||
@@ -0,0 +1,335 @@
|
||
+Blackfriday
|
||
+[![Build Status][BuildV2SVG]][BuildV2URL]
|
||
+[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL]
|
||
+===========
|
||
+
|
||
+Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
|
||
+is paranoid about its input (so you can safely feed it user-supplied
|
||
+data), it is fast, it supports common extensions (tables, smart
|
||
+punctuation substitutions, etc.), and it is safe for all utf-8
|
||
+(unicode) input.
|
||
+
|
||
+HTML output is currently supported, along with Smartypants
|
||
+extensions.
|
||
+
|
||
+It started as a translation from C of [Sundown][3].
|
||
+
|
||
+
|
||
+Installation
|
||
+------------
|
||
+
|
||
+Blackfriday is compatible with modern Go releases in module mode.
|
||
+With Go installed:
|
||
+
|
||
+ go get github.com/russross/blackfriday/v2
|
||
+
|
||
+will resolve and add the package to the current development module,
|
||
+then build and install it. Alternatively, you can achieve the same
|
||
+if you import it in a package:
|
||
+
|
||
+ import "github.com/russross/blackfriday/v2"
|
||
+
|
||
+and `go get` without parameters.
|
||
+
|
||
+Legacy GOPATH mode is unsupported.
|
||
+
|
||
+
|
||
+Versions
|
||
+--------
|
||
+
|
||
+Currently maintained and recommended version of Blackfriday is `v2`. It's being
|
||
+developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
|
||
+documentation is available at
|
||
+https://pkg.go.dev/github.com/russross/blackfriday/v2.
|
||
+
|
||
+It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`.
|
||
+
|
||
+Version 2 offers a number of improvements over v1:
|
||
+
|
||
+* Cleaned up API
|
||
+* A separate call to [`Parse`][4], which produces an abstract syntax tree for
|
||
+ the document
|
||
+* Latest bug fixes
|
||
+* Flexibility to easily add your own rendering extensions
|
||
+
|
||
+Potential drawbacks:
|
||
+
|
||
+* Our benchmarks show v2 to be slightly slower than v1. Currently in the
|
||
+ ballpark of around 15%.
|
||
+* API breakage. If you can't afford modifying your code to adhere to the new API
|
||
+ and don't care too much about the new features, v2 is probably not for you.
|
||
+* Several bug fixes are trailing behind and still need to be forward-ported to
|
||
+ v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
|
||
+ tracking.
|
||
+
|
||
+If you are still interested in the legacy `v1`, you can import it from
|
||
+`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found
|
||
+here: https://pkg.go.dev/github.com/russross/blackfriday.
|
||
+
|
||
+
|
||
+Usage
|
||
+-----
|
||
+
|
||
+For the most sensible markdown processing, it is as simple as getting your input
|
||
+into a byte slice and calling:
|
||
+
|
||
+```go
|
||
+output := blackfriday.Run(input)
|
||
+```
|
||
+
|
||
+Your input will be parsed and the output rendered with a set of most popular
|
||
+extensions enabled. If you want the most basic feature set, corresponding with
|
||
+the bare Markdown specification, use:
|
||
+
|
||
+```go
|
||
+output := blackfriday.Run(input, blackfriday.WithNoExtensions())
|
||
+```
|
||
+
|
||
+### Sanitize untrusted content
|
||
+
|
||
+Blackfriday itself does nothing to protect against malicious content. If you are
|
||
+dealing with user-supplied markdown, we recommend running Blackfriday's output
|
||
+through HTML sanitizer such as [Bluemonday][5].
|
||
+
|
||
+Here's an example of simple usage of Blackfriday together with Bluemonday:
|
||
+
|
||
+```go
|
||
+import (
|
||
+ "github.com/microcosm-cc/bluemonday"
|
||
+ "github.com/russross/blackfriday/v2"
|
||
+)
|
||
+
|
||
+// ...
|
||
+unsafe := blackfriday.Run(input)
|
||
+html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
|
||
+```
|
||
+
|
||
+### Custom options
|
||
+
|
||
+If you want to customize the set of options, use `blackfriday.WithExtensions`,
|
||
+`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
|
||
+
|
||
+### `blackfriday-tool`
|
||
+
|
||
+You can also check out `blackfriday-tool` for a more complete example
|
||
+of how to use it. Download and install it using:
|
||
+
|
||
+ go get github.com/russross/blackfriday-tool
|
||
+
|
||
+This is a simple command-line tool that allows you to process a
|
||
+markdown file using a standalone program. You can also browse the
|
||
+source directly on github if you are just looking for some example
|
||
+code:
|
||
+
|
||
+* <https://github.com/russross/blackfriday-tool>
|
||
+
|
||
+Note that if you have not already done so, installing
|
||
+`blackfriday-tool` will be sufficient to download and install
|
||
+blackfriday in addition to the tool itself. The tool binary will be
|
||
+installed in `$GOPATH/bin`. This is a statically-linked binary that
|
||
+can be copied to wherever you need it without worrying about
|
||
+dependencies and library versions.
|
||
+
|
||
+### Sanitized anchor names
|
||
+
|
||
+Blackfriday includes an algorithm for creating sanitized anchor names
|
||
+corresponding to a given input text. This algorithm is used to create
|
||
+anchors for headings when `AutoHeadingIDs` extension is enabled. The
|
||
+algorithm has a specification, so that other packages can create
|
||
+compatible anchor names and links to those anchors.
|
||
+
|
||
+The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names.
|
||
+
|
||
+[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to
|
||
+create compatible links to the anchor names generated by blackfriday.
|
||
+This algorithm is also implemented in a small standalone package at
|
||
+[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients
|
||
+that want a small package and don't need full functionality of blackfriday.
|
||
+
|
||
+
|
||
+Features
|
||
+--------
|
||
+
|
||
+All features of Sundown are supported, including:
|
||
+
|
||
+* **Compatibility**. The Markdown v1.0.3 test suite passes with
|
||
+ the `--tidy` option. Without `--tidy`, the differences are
|
||
+ mostly in whitespace and entity escaping, where blackfriday is
|
||
+ more consistent and cleaner.
|
||
+
|
||
+* **Common extensions**, including table support, fenced code
|
||
+ blocks, autolinks, strikethroughs, non-strict emphasis, etc.
|
||
+
|
||
+* **Safety**. Blackfriday is paranoid when parsing, making it safe
|
||
+ to feed untrusted user input without fear of bad things
|
||
+ happening. The test suite stress tests this and there are no
|
||
+ known inputs that make it crash. If you find one, please let me
|
||
+ know and send me the input that does it.
|
||
+
|
||
+ NOTE: "safety" in this context means *runtime safety only*. In order to
|
||
+ protect yourself against JavaScript injection in untrusted content, see
|
||
+ [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
|
||
+
|
||
+* **Fast processing**. It is fast enough to render on-demand in
|
||
+ most web applications without having to cache the output.
|
||
+
|
||
+* **Thread safety**. You can run multiple parsers in different
|
||
+ goroutines without ill effect. There is no dependence on global
|
||
+ shared state.
|
||
+
|
||
+* **Minimal dependencies**. Blackfriday only depends on standard
|
||
+ library packages in Go. The source code is pretty
|
||
+ self-contained, so it is easy to add to any project, including
|
||
+ Google App Engine projects.
|
||
+
|
||
+* **Standards compliant**. Output successfully validates using the
|
||
+ W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
|
||
+
|
||
+
|
||
+Extensions
|
||
+----------
|
||
+
|
||
+In addition to the standard markdown syntax, this package
|
||
+implements the following extensions:
|
||
+
|
||
+* **Intra-word emphasis supression**. The `_` character is
|
||
+ commonly used inside words when discussing code, so having
|
||
+ markdown interpret it as an emphasis command is usually the
|
||
+ wrong thing. Blackfriday lets you treat all emphasis markers as
|
||
+ normal characters when they occur inside a word.
|
||
+
|
||
+* **Tables**. Tables can be created by drawing them in the input
|
||
+ using a simple syntax:
|
||
+
|
||
+ ```
|
||
+ Name | Age
|
||
+ --------|------
|
||
+ Bob | 27
|
||
+ Alice | 23
|
||
+ ```
|
||
+
|
||
+* **Fenced code blocks**. In addition to the normal 4-space
|
||
+ indentation to mark code blocks, you can explicitly mark them
|
||
+ and supply a language (to make syntax highlighting simple). Just
|
||
+ mark it like this:
|
||
+
|
||
+ ```go
|
||
+ func getTrue() bool {
|
||
+ return true
|
||
+ }
|
||
+ ```
|
||
+
|
||
+ You can use 3 or more backticks to mark the beginning of the
|
||
+ block, and the same number to mark the end of the block.
|
||
+
|
||
+ To preserve classes of fenced code blocks while using the bluemonday
|
||
+ HTML sanitizer, use the following policy:
|
||
+
|
||
+ ```go
|
||
+ p := bluemonday.UGCPolicy()
|
||
+ p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code")
|
||
+ html := p.SanitizeBytes(unsafe)
|
||
+ ```
|
||
+
|
||
+* **Definition lists**. A simple definition list is made of a single-line
|
||
+ term followed by a colon and the definition for that term.
|
||
+
|
||
+ Cat
|
||
+ : Fluffy animal everyone likes
|
||
+
|
||
+ Internet
|
||
+ : Vector of transmission for pictures of cats
|
||
+
|
||
+ Terms must be separated from the previous definition by a blank line.
|
||
+
|
||
+* **Footnotes**. A marker in the text that will become a superscript number;
|
||
+ a footnote definition that will be placed in a list of footnotes at the
|
||
+ end of the document. A footnote looks like this:
|
||
+
|
||
+ This is a footnote.[^1]
|
||
+
|
||
+ [^1]: the footnote text.
|
||
+
|
||
+* **Autolinking**. Blackfriday can find URLs that have not been
|
||
+ explicitly marked as links and turn them into links.
|
||
+
|
||
+* **Strikethrough**. Use two tildes (`~~`) to mark text that
|
||
+ should be crossed out.
|
||
+
|
||
+* **Hard line breaks**. With this extension enabled newlines in the input
|
||
+ translate into line breaks in the output. This extension is off by default.
|
||
+
|
||
+* **Smart quotes**. Smartypants-style punctuation substitution is
|
||
+ supported, turning normal double- and single-quote marks into
|
||
+ curly quotes, etc.
|
||
+
|
||
+* **LaTeX-style dash parsing** is an additional option, where `--`
|
||
+ is translated into `–`, and `---` is translated into
|
||
+ `—`. This differs from most smartypants processors, which
|
||
+ turn a single hyphen into an ndash and a double hyphen into an
|
||
+ mdash.
|
||
+
|
||
+* **Smart fractions**, where anything that looks like a fraction
|
||
+ is translated into suitable HTML (instead of just a few special
|
||
+ cases like most smartypant processors). For example, `4/5`
|
||
+ becomes `<sup>4</sup>⁄<sub>5</sub>`, which renders as
|
||
+ <sup>4</sup>⁄<sub>5</sub>.
|
||
+
|
||
+
|
||
+Other renderers
|
||
+---------------
|
||
+
|
||
+Blackfriday is structured to allow alternative rendering engines. Here
|
||
+are a few of note:
|
||
+
|
||
+* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown):
|
||
+ provides a GitHub Flavored Markdown renderer with fenced code block
|
||
+ highlighting, clickable heading anchor links.
|
||
+
|
||
+ It's not customizable, and its goal is to produce HTML output
|
||
+ equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
|
||
+ except the rendering is performed locally.
|
||
+
|
||
+* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
|
||
+ but for markdown.
|
||
+
|
||
+* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex):
|
||
+ renders output as LaTeX.
|
||
+
|
||
+* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience
|
||
+ integration with the [Chroma](https://github.com/alecthomas/chroma) code
|
||
+ highlighting library. bfchroma is only compatible with v2 of Blackfriday and
|
||
+ provides a drop-in renderer ready to use with Blackfriday, as well as
|
||
+ options and means for further customization.
|
||
+
|
||
+* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer.
|
||
+
|
||
+* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style
|
||
+
|
||
+
|
||
+TODO
|
||
+----
|
||
+
|
||
+* More unit testing
|
||
+* Improve Unicode support. It does not understand all Unicode
|
||
+ rules (about what constitutes a letter, a punctuation symbol,
|
||
+ etc.), so it may fail to detect word boundaries correctly in
|
||
+ some instances. It is safe on all UTF-8 input.
|
||
+
|
||
+
|
||
+License
|
||
+-------
|
||
+
|
||
+[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
|
||
+
|
||
+
|
||
+ [1]: https://daringfireball.net/projects/markdown/ "Markdown"
|
||
+ [2]: https://golang.org/ "Go Language"
|
||
+ [3]: https://github.com/vmg/sundown "Sundown"
|
||
+ [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func"
|
||
+ [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
|
||
+
|
||
+ [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2
|
||
+ [BuildV2URL]: https://travis-ci.org/russross/blackfriday
|
||
+ [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2
|
||
+ [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go
|
||
new file mode 100644
|
||
index 000000000000..dcd61e6e35bc
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/block.go
|
||
@@ -0,0 +1,1612 @@
|
||
+//
|
||
+// Blackfriday Markdown Processor
|
||
+// Available at http://github.com/russross/blackfriday
|
||
+//
|
||
+// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||
+// Distributed under the Simplified BSD License.
|
||
+// See README.md for details.
|
||
+//
|
||
+
|
||
+//
|
||
+// Functions to parse block-level elements.
|
||
+//
|
||
+
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "html"
|
||
+ "regexp"
|
||
+ "strings"
|
||
+ "unicode"
|
||
+)
|
||
+
|
||
+const (
|
||
+ charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});"
|
||
+ escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]"
|
||
+)
|
||
+
|
||
+var (
|
||
+ reBackslashOrAmp = regexp.MustCompile("[\\&]")
|
||
+ reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity)
|
||
+)
|
||
+
|
||
+// Parse block-level data.
|
||
+// Note: this function and many that it calls assume that
|
||
+// the input buffer ends with a newline.
|
||
+func (p *Markdown) block(data []byte) {
|
||
+ // this is called recursively: enforce a maximum depth
|
||
+ if p.nesting >= p.maxNesting {
|
||
+ return
|
||
+ }
|
||
+ p.nesting++
|
||
+
|
||
+ // parse out one block-level construct at a time
|
||
+ for len(data) > 0 {
|
||
+ // prefixed heading:
|
||
+ //
|
||
+ // # Heading 1
|
||
+ // ## Heading 2
|
||
+ // ...
|
||
+ // ###### Heading 6
|
||
+ if p.isPrefixHeading(data) {
|
||
+ data = data[p.prefixHeading(data):]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // block of preformatted HTML:
|
||
+ //
|
||
+ // <div>
|
||
+ // ...
|
||
+ // </div>
|
||
+ if data[0] == '<' {
|
||
+ if i := p.html(data, true); i > 0 {
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // title block
|
||
+ //
|
||
+ // % stuff
|
||
+ // % more stuff
|
||
+ // % even more stuff
|
||
+ if p.extensions&Titleblock != 0 {
|
||
+ if data[0] == '%' {
|
||
+ if i := p.titleBlock(data, true); i > 0 {
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // blank lines. note: returns the # of bytes to skip
|
||
+ if i := p.isEmpty(data); i > 0 {
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // indented code block:
|
||
+ //
|
||
+ // func max(a, b int) int {
|
||
+ // if a > b {
|
||
+ // return a
|
||
+ // }
|
||
+ // return b
|
||
+ // }
|
||
+ if p.codePrefix(data) > 0 {
|
||
+ data = data[p.code(data):]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // fenced code block:
|
||
+ //
|
||
+ // ``` go
|
||
+ // func fact(n int) int {
|
||
+ // if n <= 1 {
|
||
+ // return n
|
||
+ // }
|
||
+ // return n * fact(n-1)
|
||
+ // }
|
||
+ // ```
|
||
+ if p.extensions&FencedCode != 0 {
|
||
+ if i := p.fencedCodeBlock(data, true); i > 0 {
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // horizontal rule:
|
||
+ //
|
||
+ // ------
|
||
+ // or
|
||
+ // ******
|
||
+ // or
|
||
+ // ______
|
||
+ if p.isHRule(data) {
|
||
+ p.addBlock(HorizontalRule, nil)
|
||
+ var i int
|
||
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
|
||
+ }
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // block quote:
|
||
+ //
|
||
+ // > A big quote I found somewhere
|
||
+ // > on the web
|
||
+ if p.quotePrefix(data) > 0 {
|
||
+ data = data[p.quote(data):]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // table:
|
||
+ //
|
||
+ // Name | Age | Phone
|
||
+ // ------|-----|---------
|
||
+ // Bob | 31 | 555-1234
|
||
+ // Alice | 27 | 555-4321
|
||
+ if p.extensions&Tables != 0 {
|
||
+ if i := p.table(data); i > 0 {
|
||
+ data = data[i:]
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // an itemized/unordered list:
|
||
+ //
|
||
+ // * Item 1
|
||
+ // * Item 2
|
||
+ //
|
||
+ // also works with + or -
|
||
+ if p.uliPrefix(data) > 0 {
|
||
+ data = data[p.list(data, 0):]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // a numbered/ordered list:
|
||
+ //
|
||
+ // 1. Item 1
|
||
+ // 2. Item 2
|
||
+ if p.oliPrefix(data) > 0 {
|
||
+ data = data[p.list(data, ListTypeOrdered):]
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // definition lists:
|
||
+ //
|
||
+ // Term 1
|
||
+ // : Definition a
|
||
+ // : Definition b
|
||
+ //
|
||
+ // Term 2
|
||
+ // : Definition c
|
||
+ if p.extensions&DefinitionLists != 0 {
|
||
+ if p.dliPrefix(data) > 0 {
|
||
+ data = data[p.list(data, ListTypeDefinition):]
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // anything else must look like a normal paragraph
|
||
+ // note: this finds underlined headings, too
|
||
+ data = data[p.paragraph(data):]
|
||
+ }
|
||
+
|
||
+ p.nesting--
|
||
+}
|
||
+
|
||
+func (p *Markdown) addBlock(typ NodeType, content []byte) *Node {
|
||
+ p.closeUnmatchedBlocks()
|
||
+ container := p.addChild(typ, 0)
|
||
+ container.content = content
|
||
+ return container
|
||
+}
|
||
+
|
||
+func (p *Markdown) isPrefixHeading(data []byte) bool {
|
||
+ if data[0] != '#' {
|
||
+ return false
|
||
+ }
|
||
+
|
||
+ if p.extensions&SpaceHeadings != 0 {
|
||
+ level := 0
|
||
+ for level < 6 && level < len(data) && data[level] == '#' {
|
||
+ level++
|
||
+ }
|
||
+ if level == len(data) || data[level] != ' ' {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+func (p *Markdown) prefixHeading(data []byte) int {
|
||
+ level := 0
|
||
+ for level < 6 && level < len(data) && data[level] == '#' {
|
||
+ level++
|
||
+ }
|
||
+ i := skipChar(data, level, ' ')
|
||
+ end := skipUntilChar(data, i, '\n')
|
||
+ skip := end
|
||
+ id := ""
|
||
+ if p.extensions&HeadingIDs != 0 {
|
||
+ j, k := 0, 0
|
||
+ // find start/end of heading id
|
||
+ for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
|
||
+ }
|
||
+ for k = j + 1; k < end && data[k] != '}'; k++ {
|
||
+ }
|
||
+ // extract heading id iff found
|
||
+ if j < end && k < end {
|
||
+ id = string(data[j+2 : k])
|
||
+ end = j
|
||
+ skip = k + 1
|
||
+ for end > 0 && data[end-1] == ' ' {
|
||
+ end--
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ for end > 0 && data[end-1] == '#' {
|
||
+ if isBackslashEscaped(data, end-1) {
|
||
+ break
|
||
+ }
|
||
+ end--
|
||
+ }
|
||
+ for end > 0 && data[end-1] == ' ' {
|
||
+ end--
|
||
+ }
|
||
+ if end > i {
|
||
+ if id == "" && p.extensions&AutoHeadingIDs != 0 {
|
||
+ id = SanitizedAnchorName(string(data[i:end]))
|
||
+ }
|
||
+ block := p.addBlock(Heading, data[i:end])
|
||
+ block.HeadingID = id
|
||
+ block.Level = level
|
||
+ }
|
||
+ return skip
|
||
+}
|
||
+
|
||
+func (p *Markdown) isUnderlinedHeading(data []byte) int {
|
||
+ // test of level 1 heading
|
||
+ if data[0] == '=' {
|
||
+ i := skipChar(data, 1, '=')
|
||
+ i = skipChar(data, i, ' ')
|
||
+ if i < len(data) && data[i] == '\n' {
|
||
+ return 1
|
||
+ }
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // test of level 2 heading
|
||
+ if data[0] == '-' {
|
||
+ i := skipChar(data, 1, '-')
|
||
+ i = skipChar(data, i, ' ')
|
||
+ if i < len(data) && data[i] == '\n' {
|
||
+ return 2
|
||
+ }
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (p *Markdown) titleBlock(data []byte, doRender bool) int {
|
||
+ if data[0] != '%' {
|
||
+ return 0
|
||
+ }
|
||
+ splitData := bytes.Split(data, []byte("\n"))
|
||
+ var i int
|
||
+ for idx, b := range splitData {
|
||
+ if !bytes.HasPrefix(b, []byte("%")) {
|
||
+ i = idx // - 1
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+
|
||
+ data = bytes.Join(splitData[0:i], []byte("\n"))
|
||
+ consumed := len(data)
|
||
+ data = bytes.TrimPrefix(data, []byte("% "))
|
||
+ data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1)
|
||
+ block := p.addBlock(Heading, data)
|
||
+ block.Level = 1
|
||
+ block.IsTitleblock = true
|
||
+
|
||
+ return consumed
|
||
+}
|
||
+
|
||
+func (p *Markdown) html(data []byte, doRender bool) int {
|
||
+ var i, j int
|
||
+
|
||
+ // identify the opening tag
|
||
+ if data[0] != '<' {
|
||
+ return 0
|
||
+ }
|
||
+ curtag, tagfound := p.htmlFindTag(data[1:])
|
||
+
|
||
+ // handle special cases
|
||
+ if !tagfound {
|
||
+ // check for an HTML comment
|
||
+ if size := p.htmlComment(data, doRender); size > 0 {
|
||
+ return size
|
||
+ }
|
||
+
|
||
+ // check for an <hr> tag
|
||
+ if size := p.htmlHr(data, doRender); size > 0 {
|
||
+ return size
|
||
+ }
|
||
+
|
||
+ // no special case recognized
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // look for an unindented matching closing tag
|
||
+ // followed by a blank line
|
||
+ found := false
|
||
+ /*
|
||
+ closetag := []byte("\n</" + curtag + ">")
|
||
+ j = len(curtag) + 1
|
||
+ for !found {
|
||
+ // scan for a closing tag at the beginning of a line
|
||
+ if skip := bytes.Index(data[j:], closetag); skip >= 0 {
|
||
+ j += skip + len(closetag)
|
||
+ } else {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // see if it is the only thing on the line
|
||
+ if skip := p.isEmpty(data[j:]); skip > 0 {
|
||
+ // see if it is followed by a blank line/eof
|
||
+ j += skip
|
||
+ if j >= len(data) {
|
||
+ found = true
|
||
+ i = j
|
||
+ } else {
|
||
+ if skip := p.isEmpty(data[j:]); skip > 0 {
|
||
+ j += skip
|
||
+ found = true
|
||
+ i = j
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+ */
|
||
+
|
||
+ // if not found, try a second pass looking for indented match
|
||
+ // but not if tag is "ins" or "del" (following original Markdown.pl)
|
||
+ if !found && curtag != "ins" && curtag != "del" {
|
||
+ i = 1
|
||
+ for i < len(data) {
|
||
+ i++
|
||
+ for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ if i+2+len(curtag) >= len(data) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ j = p.htmlFindEnd(curtag, data[i-1:])
|
||
+
|
||
+ if j > 0 {
|
||
+ i += j - 1
|
||
+ found = true
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if !found {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // the end of the block has been found
|
||
+ if doRender {
|
||
+ // trim newlines
|
||
+ end := i
|
||
+ for end > 0 && data[end-1] == '\n' {
|
||
+ end--
|
||
+ }
|
||
+ finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
|
||
+ }
|
||
+
|
||
+ return i
|
||
+}
|
||
+
|
||
+func finalizeHTMLBlock(block *Node) {
|
||
+ block.Literal = block.content
|
||
+ block.content = nil
|
||
+}
|
||
+
|
||
+// HTML comment, lax form
|
||
+func (p *Markdown) htmlComment(data []byte, doRender bool) int {
|
||
+ i := p.inlineHTMLComment(data)
|
||
+ // needs to end with a blank line
|
||
+ if j := p.isEmpty(data[i:]); j > 0 {
|
||
+ size := i + j
|
||
+ if doRender {
|
||
+ // trim trailing newlines
|
||
+ end := size
|
||
+ for end > 0 && data[end-1] == '\n' {
|
||
+ end--
|
||
+ }
|
||
+ block := p.addBlock(HTMLBlock, data[:end])
|
||
+ finalizeHTMLBlock(block)
|
||
+ }
|
||
+ return size
|
||
+ }
|
||
+ return 0
|
||
+}
|
||
+
|
||
+// HR, which is the only self-closing block tag considered
|
||
+func (p *Markdown) htmlHr(data []byte, doRender bool) int {
|
||
+ if len(data) < 4 {
|
||
+ return 0
|
||
+ }
|
||
+ if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
|
||
+ return 0
|
||
+ }
|
||
+ if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
|
||
+ // not an <hr> tag after all; at least not a valid one
|
||
+ return 0
|
||
+ }
|
||
+ i := 3
|
||
+ for i < len(data) && data[i] != '>' && data[i] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+ if i < len(data) && data[i] == '>' {
|
||
+ i++
|
||
+ if j := p.isEmpty(data[i:]); j > 0 {
|
||
+ size := i + j
|
||
+ if doRender {
|
||
+ // trim newlines
|
||
+ end := size
|
||
+ for end > 0 && data[end-1] == '\n' {
|
||
+ end--
|
||
+ }
|
||
+ finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
|
||
+ }
|
||
+ return size
|
||
+ }
|
||
+ }
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (p *Markdown) htmlFindTag(data []byte) (string, bool) {
|
||
+ i := 0
|
||
+ for i < len(data) && isalnum(data[i]) {
|
||
+ i++
|
||
+ }
|
||
+ key := string(data[:i])
|
||
+ if _, ok := blockTags[key]; ok {
|
||
+ return key, true
|
||
+ }
|
||
+ return "", false
|
||
+}
|
||
+
|
||
+func (p *Markdown) htmlFindEnd(tag string, data []byte) int {
|
||
+ // assume data[0] == '<' && data[1] == '/' already tested
|
||
+ if tag == "hr" {
|
||
+ return 2
|
||
+ }
|
||
+ // check if tag is a match
|
||
+ closetag := []byte("</" + tag + ">")
|
||
+ if !bytes.HasPrefix(data, closetag) {
|
||
+ return 0
|
||
+ }
|
||
+ i := len(closetag)
|
||
+
|
||
+ // check that the rest of the line is blank
|
||
+ skip := 0
|
||
+ if skip = p.isEmpty(data[i:]); skip == 0 {
|
||
+ return 0
|
||
+ }
|
||
+ i += skip
|
||
+ skip = 0
|
||
+
|
||
+ if i >= len(data) {
|
||
+ return i
|
||
+ }
|
||
+
|
||
+ if p.extensions&LaxHTMLBlocks != 0 {
|
||
+ return i
|
||
+ }
|
||
+ if skip = p.isEmpty(data[i:]); skip == 0 {
|
||
+ // following line must be blank
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ return i + skip
|
||
+}
|
||
+
|
||
+func (*Markdown) isEmpty(data []byte) int {
|
||
+ // it is okay to call isEmpty on an empty buffer
|
||
+ if len(data) == 0 {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ var i int
|
||
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
|
||
+ if data[i] != ' ' && data[i] != '\t' {
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+ if i < len(data) && data[i] == '\n' {
|
||
+ i++
|
||
+ }
|
||
+ return i
|
||
+}
|
||
+
|
||
+func (*Markdown) isHRule(data []byte) bool {
|
||
+ i := 0
|
||
+
|
||
+ // skip up to three spaces
|
||
+ for i < 3 && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // look at the hrule char
|
||
+ if data[i] != '*' && data[i] != '-' && data[i] != '_' {
|
||
+ return false
|
||
+ }
|
||
+ c := data[i]
|
||
+
|
||
+ // the whole line must be the char or whitespace
|
||
+ n := 0
|
||
+ for i < len(data) && data[i] != '\n' {
|
||
+ switch {
|
||
+ case data[i] == c:
|
||
+ n++
|
||
+ case data[i] != ' ':
|
||
+ return false
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ return n >= 3
|
||
+}
|
||
+
|
||
+// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
|
||
+// and returns the end index if so, or 0 otherwise. It also returns the marker found.
|
||
+// If info is not nil, it gets set to the syntax specified in the fence line.
|
||
+func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) {
|
||
+ i, size := 0, 0
|
||
+
|
||
+ // skip up to three spaces
|
||
+ for i < len(data) && i < 3 && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // check for the marker characters: ~ or `
|
||
+ if i >= len(data) {
|
||
+ return 0, ""
|
||
+ }
|
||
+ if data[i] != '~' && data[i] != '`' {
|
||
+ return 0, ""
|
||
+ }
|
||
+
|
||
+ c := data[i]
|
||
+
|
||
+ // the whole line must be the same char or whitespace
|
||
+ for i < len(data) && data[i] == c {
|
||
+ size++
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // the marker char must occur at least 3 times
|
||
+ if size < 3 {
|
||
+ return 0, ""
|
||
+ }
|
||
+ marker = string(data[i-size : i])
|
||
+
|
||
+ // if this is the end marker, it must match the beginning marker
|
||
+ if oldmarker != "" && marker != oldmarker {
|
||
+ return 0, ""
|
||
+ }
|
||
+
|
||
+ // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
|
||
+ // into one, always get the info string, and discard it if the caller doesn't care.
|
||
+ if info != nil {
|
||
+ infoLength := 0
|
||
+ i = skipChar(data, i, ' ')
|
||
+
|
||
+ if i >= len(data) {
|
||
+ if i == len(data) {
|
||
+ return i, marker
|
||
+ }
|
||
+ return 0, ""
|
||
+ }
|
||
+
|
||
+ infoStart := i
|
||
+
|
||
+ if data[i] == '{' {
|
||
+ i++
|
||
+ infoStart++
|
||
+
|
||
+ for i < len(data) && data[i] != '}' && data[i] != '\n' {
|
||
+ infoLength++
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ if i >= len(data) || data[i] != '}' {
|
||
+ return 0, ""
|
||
+ }
|
||
+
|
||
+ // strip all whitespace at the beginning and the end
|
||
+ // of the {} block
|
||
+ for infoLength > 0 && isspace(data[infoStart]) {
|
||
+ infoStart++
|
||
+ infoLength--
|
||
+ }
|
||
+
|
||
+ for infoLength > 0 && isspace(data[infoStart+infoLength-1]) {
|
||
+ infoLength--
|
||
+ }
|
||
+ i++
|
||
+ i = skipChar(data, i, ' ')
|
||
+ } else {
|
||
+ for i < len(data) && !isverticalspace(data[i]) {
|
||
+ infoLength++
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength]))
|
||
+ }
|
||
+
|
||
+ if i == len(data) {
|
||
+ return i, marker
|
||
+ }
|
||
+ if i > len(data) || data[i] != '\n' {
|
||
+ return 0, ""
|
||
+ }
|
||
+ return i + 1, marker // Take newline into account.
|
||
+}
|
||
+
|
||
+// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
|
||
+// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
|
||
+// If doRender is true, a final newline is mandatory to recognize the fenced code block.
|
||
+func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int {
|
||
+ var info string
|
||
+ beg, marker := isFenceLine(data, &info, "")
|
||
+ if beg == 0 || beg >= len(data) {
|
||
+ return 0
|
||
+ }
|
||
+ fenceLength := beg - 1
|
||
+
|
||
+ var work bytes.Buffer
|
||
+ work.Write([]byte(info))
|
||
+ work.WriteByte('\n')
|
||
+
|
||
+ for {
|
||
+ // safe to assume beg < len(data)
|
||
+
|
||
+ // check for the end of the code block
|
||
+ fenceEnd, _ := isFenceLine(data[beg:], nil, marker)
|
||
+ if fenceEnd != 0 {
|
||
+ beg += fenceEnd
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // copy the current line
|
||
+ end := skipUntilChar(data, beg, '\n') + 1
|
||
+
|
||
+ // did we reach the end of the buffer without a closing marker?
|
||
+ if end >= len(data) {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // verbatim copy to the working buffer
|
||
+ if doRender {
|
||
+ work.Write(data[beg:end])
|
||
+ }
|
||
+ beg = end
|
||
+ }
|
||
+
|
||
+ if doRender {
|
||
+ block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
|
||
+ block.IsFenced = true
|
||
+ block.FenceLength = fenceLength
|
||
+ finalizeCodeBlock(block)
|
||
+ }
|
||
+
|
||
+ return beg
|
||
+}
|
||
+
|
||
+func unescapeChar(str []byte) []byte {
|
||
+ if str[0] == '\\' {
|
||
+ return []byte{str[1]}
|
||
+ }
|
||
+ return []byte(html.UnescapeString(string(str)))
|
||
+}
|
||
+
|
||
+func unescapeString(str []byte) []byte {
|
||
+ if reBackslashOrAmp.Match(str) {
|
||
+ return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar)
|
||
+ }
|
||
+ return str
|
||
+}
|
||
+
|
||
+func finalizeCodeBlock(block *Node) {
|
||
+ if block.IsFenced {
|
||
+ newlinePos := bytes.IndexByte(block.content, '\n')
|
||
+ firstLine := block.content[:newlinePos]
|
||
+ rest := block.content[newlinePos+1:]
|
||
+ block.Info = unescapeString(bytes.Trim(firstLine, "\n"))
|
||
+ block.Literal = rest
|
||
+ } else {
|
||
+ block.Literal = block.content
|
||
+ }
|
||
+ block.content = nil
|
||
+}
|
||
+
|
||
+func (p *Markdown) table(data []byte) int {
|
||
+ table := p.addBlock(Table, nil)
|
||
+ i, columns := p.tableHeader(data)
|
||
+ if i == 0 {
|
||
+ p.tip = table.Parent
|
||
+ table.Unlink()
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ p.addBlock(TableBody, nil)
|
||
+
|
||
+ for i < len(data) {
|
||
+ pipes, rowStart := 0, i
|
||
+ for ; i < len(data) && data[i] != '\n'; i++ {
|
||
+ if data[i] == '|' {
|
||
+ pipes++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if pipes == 0 {
|
||
+ i = rowStart
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // include the newline in data sent to tableRow
|
||
+ if i < len(data) && data[i] == '\n' {
|
||
+ i++
|
||
+ }
|
||
+ p.tableRow(data[rowStart:i], columns, false)
|
||
+ }
|
||
+
|
||
+ return i
|
||
+}
|
||
+
|
||
+// check if the specified position is preceded by an odd number of backslashes
|
||
+func isBackslashEscaped(data []byte, i int) bool {
|
||
+ backslashes := 0
|
||
+ for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
|
||
+ backslashes++
|
||
+ }
|
||
+ return backslashes&1 == 1
|
||
+}
|
||
+
|
||
+func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) {
|
||
+ i := 0
|
||
+ colCount := 1
|
||
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
|
||
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
|
||
+ colCount++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // doesn't look like a table header
|
||
+ if colCount == 1 {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // include the newline in the data sent to tableRow
|
||
+ j := i
|
||
+ if j < len(data) && data[j] == '\n' {
|
||
+ j++
|
||
+ }
|
||
+ header := data[:j]
|
||
+
|
||
+ // column count ignores pipes at beginning or end of line
|
||
+ if data[0] == '|' {
|
||
+ colCount--
|
||
+ }
|
||
+ if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
|
||
+ colCount--
|
||
+ }
|
||
+
|
||
+ columns = make([]CellAlignFlags, colCount)
|
||
+
|
||
+ // move on to the header underline
|
||
+ i++
|
||
+ if i >= len(data) {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
|
||
+ i++
|
||
+ }
|
||
+ i = skipChar(data, i, ' ')
|
||
+
|
||
+ // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
|
||
+ // and trailing | optional on last column
|
||
+ col := 0
|
||
+ for i < len(data) && data[i] != '\n' {
|
||
+ dashes := 0
|
||
+
|
||
+ if data[i] == ':' {
|
||
+ i++
|
||
+ columns[col] |= TableAlignmentLeft
|
||
+ dashes++
|
||
+ }
|
||
+ for i < len(data) && data[i] == '-' {
|
||
+ i++
|
||
+ dashes++
|
||
+ }
|
||
+ if i < len(data) && data[i] == ':' {
|
||
+ i++
|
||
+ columns[col] |= TableAlignmentRight
|
||
+ dashes++
|
||
+ }
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+ if i == len(data) {
|
||
+ return
|
||
+ }
|
||
+ // end of column test is messy
|
||
+ switch {
|
||
+ case dashes < 3:
|
||
+ // not a valid column
|
||
+ return
|
||
+
|
||
+ case data[i] == '|' && !isBackslashEscaped(data, i):
|
||
+ // marker found, now skip past trailing whitespace
|
||
+ col++
|
||
+ i++
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // trailing junk found after last column
|
||
+ if col >= colCount && i < len(data) && data[i] != '\n' {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
|
||
+ // something else found where marker was required
|
||
+ return
|
||
+
|
||
+ case data[i] == '\n':
|
||
+ // marker is optional for the last column
|
||
+ col++
|
||
+
|
||
+ default:
|
||
+ // trailing junk found after last column
|
||
+ return
|
||
+ }
|
||
+ }
|
||
+ if col != colCount {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ p.addBlock(TableHead, nil)
|
||
+ p.tableRow(header, columns, true)
|
||
+ size = i
|
||
+ if size < len(data) && data[size] == '\n' {
|
||
+ size++
|
||
+ }
|
||
+ return
|
||
+}
|
||
+
|
||
+func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) {
|
||
+ p.addBlock(TableRow, nil)
|
||
+ i, col := 0, 0
|
||
+
|
||
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ for col = 0; col < len(columns) && i < len(data); col++ {
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ cellStart := i
|
||
+
|
||
+ for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ cellEnd := i
|
||
+
|
||
+ // skip the end-of-cell marker, possibly taking us past end of buffer
|
||
+ i++
|
||
+
|
||
+ for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' {
|
||
+ cellEnd--
|
||
+ }
|
||
+
|
||
+ cell := p.addBlock(TableCell, data[cellStart:cellEnd])
|
||
+ cell.IsHeader = header
|
||
+ cell.Align = columns[col]
|
||
+ }
|
||
+
|
||
+ // pad it out with empty columns to get the right number
|
||
+ for ; col < len(columns); col++ {
|
||
+ cell := p.addBlock(TableCell, nil)
|
||
+ cell.IsHeader = header
|
||
+ cell.Align = columns[col]
|
||
+ }
|
||
+
|
||
+ // silently ignore rows with too many cells
|
||
+}
|
||
+
|
||
+// returns blockquote prefix length
|
||
+func (p *Markdown) quotePrefix(data []byte) int {
|
||
+ i := 0
|
||
+ for i < 3 && i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+ if i < len(data) && data[i] == '>' {
|
||
+ if i+1 < len(data) && data[i+1] == ' ' {
|
||
+ return i + 2
|
||
+ }
|
||
+ return i + 1
|
||
+ }
|
||
+ return 0
|
||
+}
|
||
+
|
||
+// blockquote ends with at least one blank line
|
||
+// followed by something without a blockquote prefix
|
||
+func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool {
|
||
+ if p.isEmpty(data[beg:]) <= 0 {
|
||
+ return false
|
||
+ }
|
||
+ if end >= len(data) {
|
||
+ return true
|
||
+ }
|
||
+ return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
|
||
+}
|
||
+
|
||
+// parse a blockquote fragment
|
||
+func (p *Markdown) quote(data []byte) int {
|
||
+ block := p.addBlock(BlockQuote, nil)
|
||
+ var raw bytes.Buffer
|
||
+ beg, end := 0, 0
|
||
+ for beg < len(data) {
|
||
+ end = beg
|
||
+ // Step over whole lines, collecting them. While doing that, check for
|
||
+ // fenced code and if one's found, incorporate it altogether,
|
||
+ // irregardless of any contents inside it
|
||
+ for end < len(data) && data[end] != '\n' {
|
||
+ if p.extensions&FencedCode != 0 {
|
||
+ if i := p.fencedCodeBlock(data[end:], false); i > 0 {
|
||
+ // -1 to compensate for the extra end++ after the loop:
|
||
+ end += i - 1
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ end++
|
||
+ }
|
||
+ if end < len(data) && data[end] == '\n' {
|
||
+ end++
|
||
+ }
|
||
+ if pre := p.quotePrefix(data[beg:]); pre > 0 {
|
||
+ // skip the prefix
|
||
+ beg += pre
|
||
+ } else if p.terminateBlockquote(data, beg, end) {
|
||
+ break
|
||
+ }
|
||
+ // this line is part of the blockquote
|
||
+ raw.Write(data[beg:end])
|
||
+ beg = end
|
||
+ }
|
||
+ p.block(raw.Bytes())
|
||
+ p.finalize(block)
|
||
+ return end
|
||
+}
|
||
+
|
||
+// returns prefix length for block code
|
||
+func (p *Markdown) codePrefix(data []byte) int {
|
||
+ if len(data) >= 1 && data[0] == '\t' {
|
||
+ return 1
|
||
+ }
|
||
+ if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
|
||
+ return 4
|
||
+ }
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (p *Markdown) code(data []byte) int {
|
||
+ var work bytes.Buffer
|
||
+
|
||
+ i := 0
|
||
+ for i < len(data) {
|
||
+ beg := i
|
||
+ for i < len(data) && data[i] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+ if i < len(data) && data[i] == '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ blankline := p.isEmpty(data[beg:i]) > 0
|
||
+ if pre := p.codePrefix(data[beg:i]); pre > 0 {
|
||
+ beg += pre
|
||
+ } else if !blankline {
|
||
+ // non-empty, non-prefixed line breaks the pre
|
||
+ i = beg
|
||
+ break
|
||
+ }
|
||
+
|
||
+ // verbatim copy to the working buffer
|
||
+ if blankline {
|
||
+ work.WriteByte('\n')
|
||
+ } else {
|
||
+ work.Write(data[beg:i])
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // trim all the \n off the end of work
|
||
+ workbytes := work.Bytes()
|
||
+ eol := len(workbytes)
|
||
+ for eol > 0 && workbytes[eol-1] == '\n' {
|
||
+ eol--
|
||
+ }
|
||
+ if eol != len(workbytes) {
|
||
+ work.Truncate(eol)
|
||
+ }
|
||
+
|
||
+ work.WriteByte('\n')
|
||
+
|
||
+ block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
|
||
+ block.IsFenced = false
|
||
+ finalizeCodeBlock(block)
|
||
+
|
||
+ return i
|
||
+}
|
||
+
|
||
+// returns unordered list item prefix
|
||
+func (p *Markdown) uliPrefix(data []byte) int {
|
||
+ i := 0
|
||
+ // start with up to 3 spaces
|
||
+ for i < len(data) && i < 3 && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data)-1 {
|
||
+ return 0
|
||
+ }
|
||
+ // need one of {'*', '+', '-'} followed by a space or a tab
|
||
+ if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
|
||
+ (data[i+1] != ' ' && data[i+1] != '\t') {
|
||
+ return 0
|
||
+ }
|
||
+ return i + 2
|
||
+}
|
||
+
|
||
+// returns ordered list item prefix
|
||
+func (p *Markdown) oliPrefix(data []byte) int {
|
||
+ i := 0
|
||
+
|
||
+ // start with up to 3 spaces
|
||
+ for i < 3 && i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // count the digits
|
||
+ start := i
|
||
+ for i < len(data) && data[i] >= '0' && data[i] <= '9' {
|
||
+ i++
|
||
+ }
|
||
+ if start == i || i >= len(data)-1 {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // we need >= 1 digits followed by a dot and a space or a tab
|
||
+ if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') {
|
||
+ return 0
|
||
+ }
|
||
+ return i + 2
|
||
+}
|
||
+
|
||
+// returns definition list item prefix
|
||
+func (p *Markdown) dliPrefix(data []byte) int {
|
||
+ if len(data) < 2 {
|
||
+ return 0
|
||
+ }
|
||
+ i := 0
|
||
+ // need a ':' followed by a space or a tab
|
||
+ if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') {
|
||
+ return 0
|
||
+ }
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+ return i + 2
|
||
+}
|
||
+
|
||
+// parse ordered or unordered list block
|
||
+func (p *Markdown) list(data []byte, flags ListType) int {
|
||
+ i := 0
|
||
+ flags |= ListItemBeginningOfList
|
||
+ block := p.addBlock(List, nil)
|
||
+ block.ListFlags = flags
|
||
+ block.Tight = true
|
||
+
|
||
+ for i < len(data) {
|
||
+ skip := p.listItem(data[i:], &flags)
|
||
+ if flags&ListItemContainsBlock != 0 {
|
||
+ block.ListData.Tight = false
|
||
+ }
|
||
+ i += skip
|
||
+ if skip == 0 || flags&ListItemEndOfList != 0 {
|
||
+ break
|
||
+ }
|
||
+ flags &= ^ListItemBeginningOfList
|
||
+ }
|
||
+
|
||
+ above := block.Parent
|
||
+ finalizeList(block)
|
||
+ p.tip = above
|
||
+ return i
|
||
+}
|
||
+
|
||
+// Returns true if the list item is not the same type as its parent list
|
||
+func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool {
|
||
+ if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 {
|
||
+ return true
|
||
+ } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 {
|
||
+ return true
|
||
+ } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) {
|
||
+ return true
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// Returns true if block ends with a blank line, descending if needed
|
||
+// into lists and sublists.
|
||
+func endsWithBlankLine(block *Node) bool {
|
||
+ // TODO: figure this out. Always false now.
|
||
+ for block != nil {
|
||
+ //if block.lastLineBlank {
|
||
+ //return true
|
||
+ //}
|
||
+ t := block.Type
|
||
+ if t == List || t == Item {
|
||
+ block = block.LastChild
|
||
+ } else {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+func finalizeList(block *Node) {
|
||
+ block.open = false
|
||
+ item := block.FirstChild
|
||
+ for item != nil {
|
||
+ // check for non-final list item ending with blank line:
|
||
+ if endsWithBlankLine(item) && item.Next != nil {
|
||
+ block.ListData.Tight = false
|
||
+ break
|
||
+ }
|
||
+ // recurse into children of list item, to see if there are spaces
|
||
+ // between any of them:
|
||
+ subItem := item.FirstChild
|
||
+ for subItem != nil {
|
||
+ if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) {
|
||
+ block.ListData.Tight = false
|
||
+ break
|
||
+ }
|
||
+ subItem = subItem.Next
|
||
+ }
|
||
+ item = item.Next
|
||
+ }
|
||
+}
|
||
+
|
||
+// Parse a single list item.
|
||
+// Assumes initial prefix is already removed if this is a sublist.
|
||
+func (p *Markdown) listItem(data []byte, flags *ListType) int {
|
||
+ // keep track of the indentation of the first line
|
||
+ itemIndent := 0
|
||
+ if data[0] == '\t' {
|
||
+ itemIndent += 4
|
||
+ } else {
|
||
+ for itemIndent < 3 && data[itemIndent] == ' ' {
|
||
+ itemIndent++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ var bulletChar byte = '*'
|
||
+ i := p.uliPrefix(data)
|
||
+ if i == 0 {
|
||
+ i = p.oliPrefix(data)
|
||
+ } else {
|
||
+ bulletChar = data[i-2]
|
||
+ }
|
||
+ if i == 0 {
|
||
+ i = p.dliPrefix(data)
|
||
+ // reset definition term flag
|
||
+ if i > 0 {
|
||
+ *flags &= ^ListTypeTerm
|
||
+ }
|
||
+ }
|
||
+ if i == 0 {
|
||
+ // if in definition list, set term flag and continue
|
||
+ if *flags&ListTypeDefinition != 0 {
|
||
+ *flags |= ListTypeTerm
|
||
+ } else {
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // skip leading whitespace on first line
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // find the end of the line
|
||
+ line := i
|
||
+ for i > 0 && i < len(data) && data[i-1] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // get working buffer
|
||
+ var raw bytes.Buffer
|
||
+
|
||
+ // put the first line into the working buffer
|
||
+ raw.Write(data[line:i])
|
||
+ line = i
|
||
+
|
||
+ // process the following lines
|
||
+ containsBlankLine := false
|
||
+ sublist := 0
|
||
+ codeBlockMarker := ""
|
||
+
|
||
+gatherlines:
|
||
+ for line < len(data) {
|
||
+ i++
|
||
+
|
||
+ // find the end of this line
|
||
+ for i < len(data) && data[i-1] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // if it is an empty line, guess that it is part of this item
|
||
+ // and move on to the next line
|
||
+ if p.isEmpty(data[line:i]) > 0 {
|
||
+ containsBlankLine = true
|
||
+ line = i
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ // calculate the indentation
|
||
+ indent := 0
|
||
+ indentIndex := 0
|
||
+ if data[line] == '\t' {
|
||
+ indentIndex++
|
||
+ indent += 4
|
||
+ } else {
|
||
+ for indent < 4 && line+indent < i && data[line+indent] == ' ' {
|
||
+ indent++
|
||
+ indentIndex++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ chunk := data[line+indentIndex : i]
|
||
+
|
||
+ if p.extensions&FencedCode != 0 {
|
||
+ // determine if in or out of codeblock
|
||
+ // if in codeblock, ignore normal list processing
|
||
+ _, marker := isFenceLine(chunk, nil, codeBlockMarker)
|
||
+ if marker != "" {
|
||
+ if codeBlockMarker == "" {
|
||
+ // start of codeblock
|
||
+ codeBlockMarker = marker
|
||
+ } else {
|
||
+ // end of codeblock.
|
||
+ codeBlockMarker = ""
|
||
+ }
|
||
+ }
|
||
+ // we are in a codeblock, write line, and continue
|
||
+ if codeBlockMarker != "" || marker != "" {
|
||
+ raw.Write(data[line+indentIndex : i])
|
||
+ line = i
|
||
+ continue gatherlines
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // evaluate how this line fits in
|
||
+ switch {
|
||
+ // is this a nested list item?
|
||
+ case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
|
||
+ p.oliPrefix(chunk) > 0 ||
|
||
+ p.dliPrefix(chunk) > 0:
|
||
+
|
||
+ // to be a nested list, it must be indented more
|
||
+ // if not, it is either a different kind of list
|
||
+ // or the next item in the same list
|
||
+ if indent <= itemIndent {
|
||
+ if p.listTypeChanged(chunk, flags) {
|
||
+ *flags |= ListItemEndOfList
|
||
+ } else if containsBlankLine {
|
||
+ *flags |= ListItemContainsBlock
|
||
+ }
|
||
+
|
||
+ break gatherlines
|
||
+ }
|
||
+
|
||
+ if containsBlankLine {
|
||
+ *flags |= ListItemContainsBlock
|
||
+ }
|
||
+
|
||
+ // is this the first item in the nested list?
|
||
+ if sublist == 0 {
|
||
+ sublist = raw.Len()
|
||
+ }
|
||
+
|
||
+ // is this a nested prefix heading?
|
||
+ case p.isPrefixHeading(chunk):
|
||
+ // if the heading is not indented, it is not nested in the list
|
||
+ // and thus ends the list
|
||
+ if containsBlankLine && indent < 4 {
|
||
+ *flags |= ListItemEndOfList
|
||
+ break gatherlines
|
||
+ }
|
||
+ *flags |= ListItemContainsBlock
|
||
+
|
||
+ // anything following an empty line is only part
|
||
+ // of this item if it is indented 4 spaces
|
||
+ // (regardless of the indentation of the beginning of the item)
|
||
+ case containsBlankLine && indent < 4:
|
||
+ if *flags&ListTypeDefinition != 0 && i < len(data)-1 {
|
||
+ // is the next item still a part of this list?
|
||
+ next := i
|
||
+ for next < len(data) && data[next] != '\n' {
|
||
+ next++
|
||
+ }
|
||
+ for next < len(data)-1 && data[next] == '\n' {
|
||
+ next++
|
||
+ }
|
||
+ if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
|
||
+ *flags |= ListItemEndOfList
|
||
+ }
|
||
+ } else {
|
||
+ *flags |= ListItemEndOfList
|
||
+ }
|
||
+ break gatherlines
|
||
+
|
||
+ // a blank line means this should be parsed as a block
|
||
+ case containsBlankLine:
|
||
+ raw.WriteByte('\n')
|
||
+ *flags |= ListItemContainsBlock
|
||
+ }
|
||
+
|
||
+ // if this line was preceded by one or more blanks,
|
||
+ // re-introduce the blank into the buffer
|
||
+ if containsBlankLine {
|
||
+ containsBlankLine = false
|
||
+ raw.WriteByte('\n')
|
||
+ }
|
||
+
|
||
+ // add the line into the working buffer without prefix
|
||
+ raw.Write(data[line+indentIndex : i])
|
||
+
|
||
+ line = i
|
||
+ }
|
||
+
|
||
+ rawBytes := raw.Bytes()
|
||
+
|
||
+ block := p.addBlock(Item, nil)
|
||
+ block.ListFlags = *flags
|
||
+ block.Tight = false
|
||
+ block.BulletChar = bulletChar
|
||
+ block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark
|
||
+
|
||
+ // render the contents of the list item
|
||
+ if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 {
|
||
+ // intermediate render of block item, except for definition term
|
||
+ if sublist > 0 {
|
||
+ p.block(rawBytes[:sublist])
|
||
+ p.block(rawBytes[sublist:])
|
||
+ } else {
|
||
+ p.block(rawBytes)
|
||
+ }
|
||
+ } else {
|
||
+ // intermediate render of inline item
|
||
+ if sublist > 0 {
|
||
+ child := p.addChild(Paragraph, 0)
|
||
+ child.content = rawBytes[:sublist]
|
||
+ p.block(rawBytes[sublist:])
|
||
+ } else {
|
||
+ child := p.addChild(Paragraph, 0)
|
||
+ child.content = rawBytes
|
||
+ }
|
||
+ }
|
||
+ return line
|
||
+}
|
||
+
|
||
+// render a single paragraph that has already been parsed out
|
||
+func (p *Markdown) renderParagraph(data []byte) {
|
||
+ if len(data) == 0 {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // trim leading spaces
|
||
+ beg := 0
|
||
+ for data[beg] == ' ' {
|
||
+ beg++
|
||
+ }
|
||
+
|
||
+ end := len(data)
|
||
+ // trim trailing newline
|
||
+ if data[len(data)-1] == '\n' {
|
||
+ end--
|
||
+ }
|
||
+
|
||
+ // trim trailing spaces
|
||
+ for end > beg && data[end-1] == ' ' {
|
||
+ end--
|
||
+ }
|
||
+
|
||
+ p.addBlock(Paragraph, data[beg:end])
|
||
+}
|
||
+
|
||
+func (p *Markdown) paragraph(data []byte) int {
|
||
+ // prev: index of 1st char of previous line
|
||
+ // line: index of 1st char of current line
|
||
+ // i: index of cursor/end of current line
|
||
+ var prev, line, i int
|
||
+ tabSize := TabSizeDefault
|
||
+ if p.extensions&TabSizeEight != 0 {
|
||
+ tabSize = TabSizeDouble
|
||
+ }
|
||
+ // keep going until we find something to mark the end of the paragraph
|
||
+ for i < len(data) {
|
||
+ // mark the beginning of the current line
|
||
+ prev = line
|
||
+ current := data[i:]
|
||
+ line = i
|
||
+
|
||
+ // did we find a reference or a footnote? If so, end a paragraph
|
||
+ // preceding it and report that we have consumed up to the end of that
|
||
+ // reference:
|
||
+ if refEnd := isReference(p, current, tabSize); refEnd > 0 {
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i + refEnd
|
||
+ }
|
||
+
|
||
+ // did we find a blank line marking the end of the paragraph?
|
||
+ if n := p.isEmpty(current); n > 0 {
|
||
+ // did this blank line followed by a definition list item?
|
||
+ if p.extensions&DefinitionLists != 0 {
|
||
+ if i < len(data)-1 && data[i+1] == ':' {
|
||
+ return p.list(data[prev:], ListTypeDefinition)
|
||
+ }
|
||
+ }
|
||
+
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i + n
|
||
+ }
|
||
+
|
||
+ // an underline under some text marks a heading, so our paragraph ended on prev line
|
||
+ if i > 0 {
|
||
+ if level := p.isUnderlinedHeading(current); level > 0 {
|
||
+ // render the paragraph
|
||
+ p.renderParagraph(data[:prev])
|
||
+
|
||
+ // ignore leading and trailing whitespace
|
||
+ eol := i - 1
|
||
+ for prev < eol && data[prev] == ' ' {
|
||
+ prev++
|
||
+ }
|
||
+ for eol > prev && data[eol-1] == ' ' {
|
||
+ eol--
|
||
+ }
|
||
+
|
||
+ id := ""
|
||
+ if p.extensions&AutoHeadingIDs != 0 {
|
||
+ id = SanitizedAnchorName(string(data[prev:eol]))
|
||
+ }
|
||
+
|
||
+ block := p.addBlock(Heading, data[prev:eol])
|
||
+ block.Level = level
|
||
+ block.HeadingID = id
|
||
+
|
||
+ // find the end of the underline
|
||
+ for i < len(data) && data[i] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+ return i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // if the next line starts a block of HTML, then the paragraph ends here
|
||
+ if p.extensions&LaxHTMLBlocks != 0 {
|
||
+ if data[i] == '<' && p.html(current, false) > 0 {
|
||
+ // rewind to before the HTML block
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // if there's a prefixed heading or a horizontal rule after this, paragraph is over
|
||
+ if p.isPrefixHeading(current) || p.isHRule(current) {
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i
|
||
+ }
|
||
+
|
||
+ // if there's a fenced code block, paragraph is over
|
||
+ if p.extensions&FencedCode != 0 {
|
||
+ if p.fencedCodeBlock(current, false) > 0 {
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // if there's a definition list item, prev line is a definition term
|
||
+ if p.extensions&DefinitionLists != 0 {
|
||
+ if p.dliPrefix(current) != 0 {
|
||
+ ret := p.list(data[prev:], ListTypeDefinition)
|
||
+ return ret
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // if there's a list after this, paragraph is over
|
||
+ if p.extensions&NoEmptyLineBeforeBlock != 0 {
|
||
+ if p.uliPrefix(current) != 0 ||
|
||
+ p.oliPrefix(current) != 0 ||
|
||
+ p.quotePrefix(current) != 0 ||
|
||
+ p.codePrefix(current) != 0 {
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // otherwise, scan to the beginning of the next line
|
||
+ nl := bytes.IndexByte(data[i:], '\n')
|
||
+ if nl >= 0 {
|
||
+ i += nl + 1
|
||
+ } else {
|
||
+ i += len(data[i:])
|
||
+ }
|
||
+ }
|
||
+
|
||
+ p.renderParagraph(data[:i])
|
||
+ return i
|
||
+}
|
||
+
|
||
+func skipChar(data []byte, start int, char byte) int {
|
||
+ i := start
|
||
+ for i < len(data) && data[i] == char {
|
||
+ i++
|
||
+ }
|
||
+ return i
|
||
+}
|
||
+
|
||
+func skipUntilChar(text []byte, start int, char byte) int {
|
||
+ i := start
|
||
+ for i < len(text) && text[i] != char {
|
||
+ i++
|
||
+ }
|
||
+ return i
|
||
+}
|
||
+
|
||
+// SanitizedAnchorName returns a sanitized anchor name for the given text.
|
||
+//
|
||
+// It implements the algorithm specified in the package comment.
|
||
+func SanitizedAnchorName(text string) string {
|
||
+ var anchorName []rune
|
||
+ futureDash := false
|
||
+ for _, r := range text {
|
||
+ switch {
|
||
+ case unicode.IsLetter(r) || unicode.IsNumber(r):
|
||
+ if futureDash && len(anchorName) > 0 {
|
||
+ anchorName = append(anchorName, '-')
|
||
+ }
|
||
+ futureDash = false
|
||
+ anchorName = append(anchorName, unicode.ToLower(r))
|
||
+ default:
|
||
+ futureDash = true
|
||
+ }
|
||
+ }
|
||
+ return string(anchorName)
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go
|
||
new file mode 100644
|
||
index 000000000000..57ff152a0568
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/doc.go
|
||
@@ -0,0 +1,46 @@
|
||
+// Package blackfriday is a markdown processor.
|
||
+//
|
||
+// It translates plain text with simple formatting rules into an AST, which can
|
||
+// then be further processed to HTML (provided by Blackfriday itself) or other
|
||
+// formats (provided by the community).
|
||
+//
|
||
+// The simplest way to invoke Blackfriday is to call the Run function. It will
|
||
+// take a text input and produce a text output in HTML (or other format).
|
||
+//
|
||
+// A slightly more sophisticated way to use Blackfriday is to create a Markdown
|
||
+// processor and to call Parse, which returns a syntax tree for the input
|
||
+// document. You can leverage Blackfriday's parsing for content extraction from
|
||
+// markdown documents. You can assign a custom renderer and set various options
|
||
+// to the Markdown processor.
|
||
+//
|
||
+// If you're interested in calling Blackfriday from command line, see
|
||
+// https://github.com/russross/blackfriday-tool.
|
||
+//
|
||
+// Sanitized Anchor Names
|
||
+//
|
||
+// Blackfriday includes an algorithm for creating sanitized anchor names
|
||
+// corresponding to a given input text. This algorithm is used to create
|
||
+// anchors for headings when AutoHeadingIDs extension is enabled. The
|
||
+// algorithm is specified below, so that other packages can create
|
||
+// compatible anchor names and links to those anchors.
|
||
+//
|
||
+// The algorithm iterates over the input text, interpreted as UTF-8,
|
||
+// one Unicode code point (rune) at a time. All runes that are letters (category L)
|
||
+// or numbers (category N) are considered valid characters. They are mapped to
|
||
+// lower case, and included in the output. All other runes are considered
|
||
+// invalid characters. Invalid characters that precede the first valid character,
|
||
+// as well as invalid character that follow the last valid character
|
||
+// are dropped completely. All other sequences of invalid characters
|
||
+// between two valid characters are replaced with a single dash character '-'.
|
||
+//
|
||
+// SanitizedAnchorName exposes this functionality, and can be used to
|
||
+// create compatible links to the anchor names generated by blackfriday.
|
||
+// This algorithm is also implemented in a small standalone package at
|
||
+// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients
|
||
+// that want a small package and don't need full functionality of blackfriday.
|
||
+package blackfriday
|
||
+
|
||
+// NOTE: Keep Sanitized Anchor Name algorithm in sync with package
|
||
+// github.com/shurcooL/sanitized_anchor_name.
|
||
+// Otherwise, users of sanitized_anchor_name will get anchor names
|
||
+// that are incompatible with those generated by blackfriday.
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go
|
||
new file mode 100644
|
||
index 000000000000..a2c3edb691c8
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/entities.go
|
||
@@ -0,0 +1,2236 @@
|
||
+package blackfriday
|
||
+
|
||
+// Extracted from https://html.spec.whatwg.org/multipage/entities.json
|
||
+var entities = map[string]bool{
|
||
+ "Æ": true,
|
||
+ "Æ": true,
|
||
+ "&": true,
|
||
+ "&": true,
|
||
+ "Á": true,
|
||
+ "Á": true,
|
||
+ "Ă": true,
|
||
+ "Â": true,
|
||
+ "Â": true,
|
||
+ "А": true,
|
||
+ "𝔄": true,
|
||
+ "À": true,
|
||
+ "À": true,
|
||
+ "Α": true,
|
||
+ "Ā": true,
|
||
+ "⩓": true,
|
||
+ "Ą": true,
|
||
+ "𝔸": true,
|
||
+ "⁡": true,
|
||
+ "Å": true,
|
||
+ "Å": true,
|
||
+ "𝒜": true,
|
||
+ "≔": true,
|
||
+ "Ã": true,
|
||
+ "Ã": true,
|
||
+ "Ä": true,
|
||
+ "Ä": true,
|
||
+ "∖": true,
|
||
+ "⫧": true,
|
||
+ "⌆": true,
|
||
+ "Б": true,
|
||
+ "∵": true,
|
||
+ "ℬ": true,
|
||
+ "Β": true,
|
||
+ "𝔅": true,
|
||
+ "𝔹": true,
|
||
+ "˘": true,
|
||
+ "ℬ": true,
|
||
+ "≎": true,
|
||
+ "Ч": true,
|
||
+ "©": true,
|
||
+ "©": true,
|
||
+ "Ć": true,
|
||
+ "⋒": true,
|
||
+ "ⅅ": true,
|
||
+ "ℭ": true,
|
||
+ "Č": true,
|
||
+ "Ç": true,
|
||
+ "Ç": true,
|
||
+ "Ĉ": true,
|
||
+ "∰": true,
|
||
+ "Ċ": true,
|
||
+ "¸": true,
|
||
+ "·": true,
|
||
+ "ℭ": true,
|
||
+ "Χ": true,
|
||
+ "⊙": true,
|
||
+ "⊖": true,
|
||
+ "⊕": true,
|
||
+ "⊗": true,
|
||
+ "∲": true,
|
||
+ "”": true,
|
||
+ "’": true,
|
||
+ "∷": true,
|
||
+ "⩴": true,
|
||
+ "≡": true,
|
||
+ "∯": true,
|
||
+ "∮": true,
|
||
+ "ℂ": true,
|
||
+ "∐": true,
|
||
+ "∳": true,
|
||
+ "⨯": true,
|
||
+ "𝒞": true,
|
||
+ "⋓": true,
|
||
+ "≍": true,
|
||
+ "ⅅ": true,
|
||
+ "⤑": true,
|
||
+ "Ђ": true,
|
||
+ "Ѕ": true,
|
||
+ "Џ": true,
|
||
+ "‡": true,
|
||
+ "↡": true,
|
||
+ "⫤": true,
|
||
+ "Ď": true,
|
||
+ "Д": true,
|
||
+ "∇": true,
|
||
+ "Δ": true,
|
||
+ "𝔇": true,
|
||
+ "´": true,
|
||
+ "˙": true,
|
||
+ "˝": true,
|
||
+ "`": true,
|
||
+ "˜": true,
|
||
+ "⋄": true,
|
||
+ "ⅆ": true,
|
||
+ "𝔻": true,
|
||
+ "¨": true,
|
||
+ "⃜": true,
|
||
+ "≐": true,
|
||
+ "∯": true,
|
||
+ "¨": true,
|
||
+ "⇓": true,
|
||
+ "⇐": true,
|
||
+ "⇔": true,
|
||
+ "⫤": true,
|
||
+ "⟸": true,
|
||
+ "⟺": true,
|
||
+ "⟹": true,
|
||
+ "⇒": true,
|
||
+ "⊨": true,
|
||
+ "⇑": true,
|
||
+ "⇕": true,
|
||
+ "∥": true,
|
||
+ "↓": true,
|
||
+ "⤓": true,
|
||
+ "⇵": true,
|
||
+ "̑": true,
|
||
+ "⥐": true,
|
||
+ "⥞": true,
|
||
+ "↽": true,
|
||
+ "⥖": true,
|
||
+ "⥟": true,
|
||
+ "⇁": true,
|
||
+ "⥗": true,
|
||
+ "⊤": true,
|
||
+ "↧": true,
|
||
+ "⇓": true,
|
||
+ "𝒟": true,
|
||
+ "Đ": true,
|
||
+ "Ŋ": true,
|
||
+ "Ð": true,
|
||
+ "Ð": true,
|
||
+ "É": true,
|
||
+ "É": true,
|
||
+ "Ě": true,
|
||
+ "Ê": true,
|
||
+ "Ê": true,
|
||
+ "Э": true,
|
||
+ "Ė": true,
|
||
+ "𝔈": true,
|
||
+ "È": true,
|
||
+ "È": true,
|
||
+ "∈": true,
|
||
+ "Ē": true,
|
||
+ "◻": true,
|
||
+ "▫": true,
|
||
+ "Ę": true,
|
||
+ "𝔼": true,
|
||
+ "Ε": true,
|
||
+ "⩵": true,
|
||
+ "≂": true,
|
||
+ "⇌": true,
|
||
+ "ℰ": true,
|
||
+ "⩳": true,
|
||
+ "Η": true,
|
||
+ "Ë": true,
|
||
+ "Ë": true,
|
||
+ "∃": true,
|
||
+ "ⅇ": true,
|
||
+ "Ф": true,
|
||
+ "𝔉": true,
|
||
+ "◼": true,
|
||
+ "▪": true,
|
||
+ "𝔽": true,
|
||
+ "∀": true,
|
||
+ "ℱ": true,
|
||
+ "ℱ": true,
|
||
+ "Ѓ": true,
|
||
+ ">": true,
|
||
+ ">": true,
|
||
+ "Γ": true,
|
||
+ "Ϝ": true,
|
||
+ "Ğ": true,
|
||
+ "Ģ": true,
|
||
+ "Ĝ": true,
|
||
+ "Г": true,
|
||
+ "Ġ": true,
|
||
+ "𝔊": true,
|
||
+ "⋙": true,
|
||
+ "𝔾": true,
|
||
+ "≥": true,
|
||
+ "⋛": true,
|
||
+ "≧": true,
|
||
+ "⪢": true,
|
||
+ "≷": true,
|
||
+ "⩾": true,
|
||
+ "≳": true,
|
||
+ "𝒢": true,
|
||
+ "≫": true,
|
||
+ "Ъ": true,
|
||
+ "ˇ": true,
|
||
+ "^": true,
|
||
+ "Ĥ": true,
|
||
+ "ℌ": true,
|
||
+ "ℋ": true,
|
||
+ "ℍ": true,
|
||
+ "─": true,
|
||
+ "ℋ": true,
|
||
+ "Ħ": true,
|
||
+ "≎": true,
|
||
+ "≏": true,
|
||
+ "Е": true,
|
||
+ "IJ": true,
|
||
+ "Ё": true,
|
||
+ "Í": true,
|
||
+ "Í": true,
|
||
+ "Î": true,
|
||
+ "Î": true,
|
||
+ "И": true,
|
||
+ "İ": true,
|
||
+ "ℑ": true,
|
||
+ "Ì": true,
|
||
+ "Ì": true,
|
||
+ "ℑ": true,
|
||
+ "Ī": true,
|
||
+ "ⅈ": true,
|
||
+ "⇒": true,
|
||
+ "∬": true,
|
||
+ "∫": true,
|
||
+ "⋂": true,
|
||
+ "⁣": true,
|
||
+ "⁢": true,
|
||
+ "Į": true,
|
||
+ "𝕀": true,
|
||
+ "Ι": true,
|
||
+ "ℐ": true,
|
||
+ "Ĩ": true,
|
||
+ "І": true,
|
||
+ "Ï": true,
|
||
+ "Ï": true,
|
||
+ "Ĵ": true,
|
||
+ "Й": true,
|
||
+ "𝔍": true,
|
||
+ "𝕁": true,
|
||
+ "𝒥": true,
|
||
+ "Ј": true,
|
||
+ "Є": true,
|
||
+ "Х": true,
|
||
+ "Ќ": true,
|
||
+ "Κ": true,
|
||
+ "Ķ": true,
|
||
+ "К": true,
|
||
+ "𝔎": true,
|
||
+ "𝕂": true,
|
||
+ "𝒦": true,
|
||
+ "Љ": true,
|
||
+ "<": true,
|
||
+ "<": true,
|
||
+ "Ĺ": true,
|
||
+ "Λ": true,
|
||
+ "⟪": true,
|
||
+ "ℒ": true,
|
||
+ "↞": true,
|
||
+ "Ľ": true,
|
||
+ "Ļ": true,
|
||
+ "Л": true,
|
||
+ "⟨": true,
|
||
+ "←": true,
|
||
+ "⇤": true,
|
||
+ "⇆": true,
|
||
+ "⌈": true,
|
||
+ "⟦": true,
|
||
+ "⥡": true,
|
||
+ "⇃": true,
|
||
+ "⥙": true,
|
||
+ "⌊": true,
|
||
+ "↔": true,
|
||
+ "⥎": true,
|
||
+ "⊣": true,
|
||
+ "↤": true,
|
||
+ "⥚": true,
|
||
+ "⊲": true,
|
||
+ "⧏": true,
|
||
+ "⊴": true,
|
||
+ "⥑": true,
|
||
+ "⥠": true,
|
||
+ "↿": true,
|
||
+ "⥘": true,
|
||
+ "↼": true,
|
||
+ "⥒": true,
|
||
+ "⇐": true,
|
||
+ "⇔": true,
|
||
+ "⋚": true,
|
||
+ "≦": true,
|
||
+ "≶": true,
|
||
+ "⪡": true,
|
||
+ "⩽": true,
|
||
+ "≲": true,
|
||
+ "𝔏": true,
|
||
+ "⋘": true,
|
||
+ "⇚": true,
|
||
+ "Ŀ": true,
|
||
+ "⟵": true,
|
||
+ "⟷": true,
|
||
+ "⟶": true,
|
||
+ "⟸": true,
|
||
+ "⟺": true,
|
||
+ "⟹": true,
|
||
+ "𝕃": true,
|
||
+ "↙": true,
|
||
+ "↘": true,
|
||
+ "ℒ": true,
|
||
+ "↰": true,
|
||
+ "Ł": true,
|
||
+ "≪": true,
|
||
+ "⤅": true,
|
||
+ "М": true,
|
||
+ " ": true,
|
||
+ "ℳ": true,
|
||
+ "𝔐": true,
|
||
+ "∓": true,
|
||
+ "𝕄": true,
|
||
+ "ℳ": true,
|
||
+ "Μ": true,
|
||
+ "Њ": true,
|
||
+ "Ń": true,
|
||
+ "Ň": true,
|
||
+ "Ņ": true,
|
||
+ "Н": true,
|
||
+ "​": true,
|
||
+ "​": true,
|
||
+ "​": true,
|
||
+ "​": true,
|
||
+ "≫": true,
|
||
+ "≪": true,
|
||
+ "
": true,
|
||
+ "𝔑": true,
|
||
+ "⁠": true,
|
||
+ " ": true,
|
||
+ "ℕ": true,
|
||
+ "⫬": true,
|
||
+ "≢": true,
|
||
+ "≭": true,
|
||
+ "∦": true,
|
||
+ "∉": true,
|
||
+ "≠": true,
|
||
+ "≂̸": true,
|
||
+ "∄": true,
|
||
+ "≯": true,
|
||
+ "≱": true,
|
||
+ "≧̸": true,
|
||
+ "≫̸": true,
|
||
+ "≹": true,
|
||
+ "⩾̸": true,
|
||
+ "≵": true,
|
||
+ "≎̸": true,
|
||
+ "≏̸": true,
|
||
+ "⋪": true,
|
||
+ "⧏̸": true,
|
||
+ "⋬": true,
|
||
+ "≮": true,
|
||
+ "≰": true,
|
||
+ "≸": true,
|
||
+ "≪̸": true,
|
||
+ "⩽̸": true,
|
||
+ "≴": true,
|
||
+ "⪢̸": true,
|
||
+ "⪡̸": true,
|
||
+ "⊀": true,
|
||
+ "⪯̸": true,
|
||
+ "⋠": true,
|
||
+ "∌": true,
|
||
+ "⋫": true,
|
||
+ "⧐̸": true,
|
||
+ "⋭": true,
|
||
+ "⊏̸": true,
|
||
+ "⋢": true,
|
||
+ "⊐̸": true,
|
||
+ "⋣": true,
|
||
+ "⊂⃒": true,
|
||
+ "⊈": true,
|
||
+ "⊁": true,
|
||
+ "⪰̸": true,
|
||
+ "⋡": true,
|
||
+ "≿̸": true,
|
||
+ "⊃⃒": true,
|
||
+ "⊉": true,
|
||
+ "≁": true,
|
||
+ "≄": true,
|
||
+ "≇": true,
|
||
+ "≉": true,
|
||
+ "∤": true,
|
||
+ "𝒩": true,
|
||
+ "Ñ": true,
|
||
+ "Ñ": true,
|
||
+ "Ν": true,
|
||
+ "Œ": true,
|
||
+ "Ó": true,
|
||
+ "Ó": true,
|
||
+ "Ô": true,
|
||
+ "Ô": true,
|
||
+ "О": true,
|
||
+ "Ő": true,
|
||
+ "𝔒": true,
|
||
+ "Ò": true,
|
||
+ "Ò": true,
|
||
+ "Ō": true,
|
||
+ "Ω": true,
|
||
+ "Ο": true,
|
||
+ "𝕆": true,
|
||
+ "“": true,
|
||
+ "‘": true,
|
||
+ "⩔": true,
|
||
+ "𝒪": true,
|
||
+ "Ø": true,
|
||
+ "Ø": true,
|
||
+ "Õ": true,
|
||
+ "Õ": true,
|
||
+ "⨷": true,
|
||
+ "Ö": true,
|
||
+ "Ö": true,
|
||
+ "‾": true,
|
||
+ "⏞": true,
|
||
+ "⎴": true,
|
||
+ "⏜": true,
|
||
+ "∂": true,
|
||
+ "П": true,
|
||
+ "𝔓": true,
|
||
+ "Φ": true,
|
||
+ "Π": true,
|
||
+ "±": true,
|
||
+ "ℌ": true,
|
||
+ "ℙ": true,
|
||
+ "⪻": true,
|
||
+ "≺": true,
|
||
+ "⪯": true,
|
||
+ "≼": true,
|
||
+ "≾": true,
|
||
+ "″": true,
|
||
+ "∏": true,
|
||
+ "∷": true,
|
||
+ "∝": true,
|
||
+ "𝒫": true,
|
||
+ "Ψ": true,
|
||
+ """: true,
|
||
+ """: true,
|
||
+ "𝔔": true,
|
||
+ "ℚ": true,
|
||
+ "𝒬": true,
|
||
+ "⤐": true,
|
||
+ "®": true,
|
||
+ "®": true,
|
||
+ "Ŕ": true,
|
||
+ "⟫": true,
|
||
+ "↠": true,
|
||
+ "⤖": true,
|
||
+ "Ř": true,
|
||
+ "Ŗ": true,
|
||
+ "Р": true,
|
||
+ "ℜ": true,
|
||
+ "∋": true,
|
||
+ "⇋": true,
|
||
+ "⥯": true,
|
||
+ "ℜ": true,
|
||
+ "Ρ": true,
|
||
+ "⟩": true,
|
||
+ "→": true,
|
||
+ "⇥": true,
|
||
+ "⇄": true,
|
||
+ "⌉": true,
|
||
+ "⟧": true,
|
||
+ "⥝": true,
|
||
+ "⇂": true,
|
||
+ "⥕": true,
|
||
+ "⌋": true,
|
||
+ "⊢": true,
|
||
+ "↦": true,
|
||
+ "⥛": true,
|
||
+ "⊳": true,
|
||
+ "⧐": true,
|
||
+ "⊵": true,
|
||
+ "⥏": true,
|
||
+ "⥜": true,
|
||
+ "↾": true,
|
||
+ "⥔": true,
|
||
+ "⇀": true,
|
||
+ "⥓": true,
|
||
+ "⇒": true,
|
||
+ "ℝ": true,
|
||
+ "⥰": true,
|
||
+ "⇛": true,
|
||
+ "ℛ": true,
|
||
+ "↱": true,
|
||
+ "⧴": true,
|
||
+ "Щ": true,
|
||
+ "Ш": true,
|
||
+ "Ь": true,
|
||
+ "Ś": true,
|
||
+ "⪼": true,
|
||
+ "Š": true,
|
||
+ "Ş": true,
|
||
+ "Ŝ": true,
|
||
+ "С": true,
|
||
+ "𝔖": true,
|
||
+ "↓": true,
|
||
+ "←": true,
|
||
+ "→": true,
|
||
+ "↑": true,
|
||
+ "Σ": true,
|
||
+ "∘": true,
|
||
+ "𝕊": true,
|
||
+ "√": true,
|
||
+ "□": true,
|
||
+ "⊓": true,
|
||
+ "⊏": true,
|
||
+ "⊑": true,
|
||
+ "⊐": true,
|
||
+ "⊒": true,
|
||
+ "⊔": true,
|
||
+ "𝒮": true,
|
||
+ "⋆": true,
|
||
+ "⋐": true,
|
||
+ "⋐": true,
|
||
+ "⊆": true,
|
||
+ "≻": true,
|
||
+ "⪰": true,
|
||
+ "≽": true,
|
||
+ "≿": true,
|
||
+ "∋": true,
|
||
+ "∑": true,
|
||
+ "⋑": true,
|
||
+ "⊃": true,
|
||
+ "⊇": true,
|
||
+ "⋑": true,
|
||
+ "Þ": true,
|
||
+ "Þ": true,
|
||
+ "™": true,
|
||
+ "Ћ": true,
|
||
+ "Ц": true,
|
||
+ "	": true,
|
||
+ "Τ": true,
|
||
+ "Ť": true,
|
||
+ "Ţ": true,
|
||
+ "Т": true,
|
||
+ "𝔗": true,
|
||
+ "∴": true,
|
||
+ "Θ": true,
|
||
+ "  ": true,
|
||
+ " ": true,
|
||
+ "∼": true,
|
||
+ "≃": true,
|
||
+ "≅": true,
|
||
+ "≈": true,
|
||
+ "𝕋": true,
|
||
+ "⃛": true,
|
||
+ "𝒯": true,
|
||
+ "Ŧ": true,
|
||
+ "Ú": true,
|
||
+ "Ú": true,
|
||
+ "↟": true,
|
||
+ "⥉": true,
|
||
+ "Ў": true,
|
||
+ "Ŭ": true,
|
||
+ "Û": true,
|
||
+ "Û": true,
|
||
+ "У": true,
|
||
+ "Ű": true,
|
||
+ "𝔘": true,
|
||
+ "Ù": true,
|
||
+ "Ù": true,
|
||
+ "Ū": true,
|
||
+ "_": true,
|
||
+ "⏟": true,
|
||
+ "⎵": true,
|
||
+ "⏝": true,
|
||
+ "⋃": true,
|
||
+ "⊎": true,
|
||
+ "Ų": true,
|
||
+ "𝕌": true,
|
||
+ "↑": true,
|
||
+ "⤒": true,
|
||
+ "⇅": true,
|
||
+ "↕": true,
|
||
+ "⥮": true,
|
||
+ "⊥": true,
|
||
+ "↥": true,
|
||
+ "⇑": true,
|
||
+ "⇕": true,
|
||
+ "↖": true,
|
||
+ "↗": true,
|
||
+ "ϒ": true,
|
||
+ "Υ": true,
|
||
+ "Ů": true,
|
||
+ "𝒰": true,
|
||
+ "Ũ": true,
|
||
+ "Ü": true,
|
||
+ "Ü": true,
|
||
+ "⊫": true,
|
||
+ "⫫": true,
|
||
+ "В": true,
|
||
+ "⊩": true,
|
||
+ "⫦": true,
|
||
+ "⋁": true,
|
||
+ "‖": true,
|
||
+ "‖": true,
|
||
+ "∣": true,
|
||
+ "|": true,
|
||
+ "❘": true,
|
||
+ "≀": true,
|
||
+ " ": true,
|
||
+ "𝔙": true,
|
||
+ "𝕍": true,
|
||
+ "𝒱": true,
|
||
+ "⊪": true,
|
||
+ "Ŵ": true,
|
||
+ "⋀": true,
|
||
+ "𝔚": true,
|
||
+ "𝕎": true,
|
||
+ "𝒲": true,
|
||
+ "𝔛": true,
|
||
+ "Ξ": true,
|
||
+ "𝕏": true,
|
||
+ "𝒳": true,
|
||
+ "Я": true,
|
||
+ "Ї": true,
|
||
+ "Ю": true,
|
||
+ "Ý": true,
|
||
+ "Ý": true,
|
||
+ "Ŷ": true,
|
||
+ "Ы": true,
|
||
+ "𝔜": true,
|
||
+ "𝕐": true,
|
||
+ "𝒴": true,
|
||
+ "Ÿ": true,
|
||
+ "Ж": true,
|
||
+ "Ź": true,
|
||
+ "Ž": true,
|
||
+ "З": true,
|
||
+ "Ż": true,
|
||
+ "​": true,
|
||
+ "Ζ": true,
|
||
+ "ℨ": true,
|
||
+ "ℤ": true,
|
||
+ "𝒵": true,
|
||
+ "á": true,
|
||
+ "á": true,
|
||
+ "ă": true,
|
||
+ "∾": true,
|
||
+ "∾̳": true,
|
||
+ "∿": true,
|
||
+ "â": true,
|
||
+ "â": true,
|
||
+ "´": true,
|
||
+ "´": true,
|
||
+ "а": true,
|
||
+ "æ": true,
|
||
+ "æ": true,
|
||
+ "⁡": true,
|
||
+ "𝔞": true,
|
||
+ "à": true,
|
||
+ "à": true,
|
||
+ "ℵ": true,
|
||
+ "ℵ": true,
|
||
+ "α": true,
|
||
+ "ā": true,
|
||
+ "⨿": true,
|
||
+ "&": true,
|
||
+ "&": true,
|
||
+ "∧": true,
|
||
+ "⩕": true,
|
||
+ "⩜": true,
|
||
+ "⩘": true,
|
||
+ "⩚": true,
|
||
+ "∠": true,
|
||
+ "⦤": true,
|
||
+ "∠": true,
|
||
+ "∡": true,
|
||
+ "⦨": true,
|
||
+ "⦩": true,
|
||
+ "⦪": true,
|
||
+ "⦫": true,
|
||
+ "⦬": true,
|
||
+ "⦭": true,
|
||
+ "⦮": true,
|
||
+ "⦯": true,
|
||
+ "∟": true,
|
||
+ "⊾": true,
|
||
+ "⦝": true,
|
||
+ "∢": true,
|
||
+ "Å": true,
|
||
+ "⍼": true,
|
||
+ "ą": true,
|
||
+ "𝕒": true,
|
||
+ "≈": true,
|
||
+ "⩰": true,
|
||
+ "⩯": true,
|
||
+ "≊": true,
|
||
+ "≋": true,
|
||
+ "'": true,
|
||
+ "≈": true,
|
||
+ "≊": true,
|
||
+ "å": true,
|
||
+ "å": true,
|
||
+ "𝒶": true,
|
||
+ "*": true,
|
||
+ "≈": true,
|
||
+ "≍": true,
|
||
+ "ã": true,
|
||
+ "ã": true,
|
||
+ "ä": true,
|
||
+ "ä": true,
|
||
+ "∳": true,
|
||
+ "⨑": true,
|
||
+ "⫭": true,
|
||
+ "≌": true,
|
||
+ "϶": true,
|
||
+ "‵": true,
|
||
+ "∽": true,
|
||
+ "⋍": true,
|
||
+ "⊽": true,
|
||
+ "⌅": true,
|
||
+ "⌅": true,
|
||
+ "⎵": true,
|
||
+ "⎶": true,
|
||
+ "≌": true,
|
||
+ "б": true,
|
||
+ "„": true,
|
||
+ "∵": true,
|
||
+ "∵": true,
|
||
+ "⦰": true,
|
||
+ "϶": true,
|
||
+ "ℬ": true,
|
||
+ "β": true,
|
||
+ "ℶ": true,
|
||
+ "≬": true,
|
||
+ "𝔟": true,
|
||
+ "⋂": true,
|
||
+ "◯": true,
|
||
+ "⋃": true,
|
||
+ "⨀": true,
|
||
+ "⨁": true,
|
||
+ "⨂": true,
|
||
+ "⨆": true,
|
||
+ "★": true,
|
||
+ "▽": true,
|
||
+ "△": true,
|
||
+ "⨄": true,
|
||
+ "⋁": true,
|
||
+ "⋀": true,
|
||
+ "⤍": true,
|
||
+ "⧫": true,
|
||
+ "▪": true,
|
||
+ "▴": true,
|
||
+ "▾": true,
|
||
+ "◂": true,
|
||
+ "▸": true,
|
||
+ "␣": true,
|
||
+ "▒": true,
|
||
+ "░": true,
|
||
+ "▓": true,
|
||
+ "█": true,
|
||
+ "=⃥": true,
|
||
+ "≡⃥": true,
|
||
+ "⌐": true,
|
||
+ "𝕓": true,
|
||
+ "⊥": true,
|
||
+ "⊥": true,
|
||
+ "⋈": true,
|
||
+ "╗": true,
|
||
+ "╔": true,
|
||
+ "╖": true,
|
||
+ "╓": true,
|
||
+ "═": true,
|
||
+ "╦": true,
|
||
+ "╩": true,
|
||
+ "╤": true,
|
||
+ "╧": true,
|
||
+ "╝": true,
|
||
+ "╚": true,
|
||
+ "╜": true,
|
||
+ "╙": true,
|
||
+ "║": true,
|
||
+ "╬": true,
|
||
+ "╣": true,
|
||
+ "╠": true,
|
||
+ "╫": true,
|
||
+ "╢": true,
|
||
+ "╟": true,
|
||
+ "⧉": true,
|
||
+ "╕": true,
|
||
+ "╒": true,
|
||
+ "┐": true,
|
||
+ "┌": true,
|
||
+ "─": true,
|
||
+ "╥": true,
|
||
+ "╨": true,
|
||
+ "┬": true,
|
||
+ "┴": true,
|
||
+ "⊟": true,
|
||
+ "⊞": true,
|
||
+ "⊠": true,
|
||
+ "╛": true,
|
||
+ "╘": true,
|
||
+ "┘": true,
|
||
+ "└": true,
|
||
+ "│": true,
|
||
+ "╪": true,
|
||
+ "╡": true,
|
||
+ "╞": true,
|
||
+ "┼": true,
|
||
+ "┤": true,
|
||
+ "├": true,
|
||
+ "‵": true,
|
||
+ "˘": true,
|
||
+ "¦": true,
|
||
+ "¦": true,
|
||
+ "𝒷": true,
|
||
+ "⁏": true,
|
||
+ "∽": true,
|
||
+ "⋍": true,
|
||
+ "\": true,
|
||
+ "⧅": true,
|
||
+ "⟈": true,
|
||
+ "•": true,
|
||
+ "•": true,
|
||
+ "≎": true,
|
||
+ "⪮": true,
|
||
+ "≏": true,
|
||
+ "≏": true,
|
||
+ "ć": true,
|
||
+ "∩": true,
|
||
+ "⩄": true,
|
||
+ "⩉": true,
|
||
+ "⩋": true,
|
||
+ "⩇": true,
|
||
+ "⩀": true,
|
||
+ "∩︀": true,
|
||
+ "⁁": true,
|
||
+ "ˇ": true,
|
||
+ "⩍": true,
|
||
+ "č": true,
|
||
+ "ç": true,
|
||
+ "ç": true,
|
||
+ "ĉ": true,
|
||
+ "⩌": true,
|
||
+ "⩐": true,
|
||
+ "ċ": true,
|
||
+ "¸": true,
|
||
+ "¸": true,
|
||
+ "⦲": true,
|
||
+ "¢": true,
|
||
+ "¢": true,
|
||
+ "·": true,
|
||
+ "𝔠": true,
|
||
+ "ч": true,
|
||
+ "✓": true,
|
||
+ "✓": true,
|
||
+ "χ": true,
|
||
+ "○": true,
|
||
+ "⧃": true,
|
||
+ "ˆ": true,
|
||
+ "≗": true,
|
||
+ "↺": true,
|
||
+ "↻": true,
|
||
+ "®": true,
|
||
+ "Ⓢ": true,
|
||
+ "⊛": true,
|
||
+ "⊚": true,
|
||
+ "⊝": true,
|
||
+ "≗": true,
|
||
+ "⨐": true,
|
||
+ "⫯": true,
|
||
+ "⧂": true,
|
||
+ "♣": true,
|
||
+ "♣": true,
|
||
+ ":": true,
|
||
+ "≔": true,
|
||
+ "≔": true,
|
||
+ ",": true,
|
||
+ "@": true,
|
||
+ "∁": true,
|
||
+ "∘": true,
|
||
+ "∁": true,
|
||
+ "ℂ": true,
|
||
+ "≅": true,
|
||
+ "⩭": true,
|
||
+ "∮": true,
|
||
+ "𝕔": true,
|
||
+ "∐": true,
|
||
+ "©": true,
|
||
+ "©": true,
|
||
+ "℗": true,
|
||
+ "↵": true,
|
||
+ "✗": true,
|
||
+ "𝒸": true,
|
||
+ "⫏": true,
|
||
+ "⫑": true,
|
||
+ "⫐": true,
|
||
+ "⫒": true,
|
||
+ "⋯": true,
|
||
+ "⤸": true,
|
||
+ "⤵": true,
|
||
+ "⋞": true,
|
||
+ "⋟": true,
|
||
+ "↶": true,
|
||
+ "⤽": true,
|
||
+ "∪": true,
|
||
+ "⩈": true,
|
||
+ "⩆": true,
|
||
+ "⩊": true,
|
||
+ "⊍": true,
|
||
+ "⩅": true,
|
||
+ "∪︀": true,
|
||
+ "↷": true,
|
||
+ "⤼": true,
|
||
+ "⋞": true,
|
||
+ "⋟": true,
|
||
+ "⋎": true,
|
||
+ "⋏": true,
|
||
+ "¤": true,
|
||
+ "¤": true,
|
||
+ "↶": true,
|
||
+ "↷": true,
|
||
+ "⋎": true,
|
||
+ "⋏": true,
|
||
+ "∲": true,
|
||
+ "∱": true,
|
||
+ "⌭": true,
|
||
+ "⇓": true,
|
||
+ "⥥": true,
|
||
+ "†": true,
|
||
+ "ℸ": true,
|
||
+ "↓": true,
|
||
+ "‐": true,
|
||
+ "⊣": true,
|
||
+ "⤏": true,
|
||
+ "˝": true,
|
||
+ "ď": true,
|
||
+ "д": true,
|
||
+ "ⅆ": true,
|
||
+ "‡": true,
|
||
+ "⇊": true,
|
||
+ "⩷": true,
|
||
+ "°": true,
|
||
+ "°": true,
|
||
+ "δ": true,
|
||
+ "⦱": true,
|
||
+ "⥿": true,
|
||
+ "𝔡": true,
|
||
+ "⇃": true,
|
||
+ "⇂": true,
|
||
+ "⋄": true,
|
||
+ "⋄": true,
|
||
+ "♦": true,
|
||
+ "♦": true,
|
||
+ "¨": true,
|
||
+ "ϝ": true,
|
||
+ "⋲": true,
|
||
+ "÷": true,
|
||
+ "÷": true,
|
||
+ "÷": true,
|
||
+ "⋇": true,
|
||
+ "⋇": true,
|
||
+ "ђ": true,
|
||
+ "⌞": true,
|
||
+ "⌍": true,
|
||
+ "$": true,
|
||
+ "𝕕": true,
|
||
+ "˙": true,
|
||
+ "≐": true,
|
||
+ "≑": true,
|
||
+ "∸": true,
|
||
+ "∔": true,
|
||
+ "⊡": true,
|
||
+ "⌆": true,
|
||
+ "↓": true,
|
||
+ "⇊": true,
|
||
+ "⇃": true,
|
||
+ "⇂": true,
|
||
+ "⤐": true,
|
||
+ "⌟": true,
|
||
+ "⌌": true,
|
||
+ "𝒹": true,
|
||
+ "ѕ": true,
|
||
+ "⧶": true,
|
||
+ "đ": true,
|
||
+ "⋱": true,
|
||
+ "▿": true,
|
||
+ "▾": true,
|
||
+ "⇵": true,
|
||
+ "⥯": true,
|
||
+ "⦦": true,
|
||
+ "џ": true,
|
||
+ "⟿": true,
|
||
+ "⩷": true,
|
||
+ "≑": true,
|
||
+ "é": true,
|
||
+ "é": true,
|
||
+ "⩮": true,
|
||
+ "ě": true,
|
||
+ "≖": true,
|
||
+ "ê": true,
|
||
+ "ê": true,
|
||
+ "≕": true,
|
||
+ "э": true,
|
||
+ "ė": true,
|
||
+ "ⅇ": true,
|
||
+ "≒": true,
|
||
+ "𝔢": true,
|
||
+ "⪚": true,
|
||
+ "è": true,
|
||
+ "è": true,
|
||
+ "⪖": true,
|
||
+ "⪘": true,
|
||
+ "⪙": true,
|
||
+ "⏧": true,
|
||
+ "ℓ": true,
|
||
+ "⪕": true,
|
||
+ "⪗": true,
|
||
+ "ē": true,
|
||
+ "∅": true,
|
||
+ "∅": true,
|
||
+ "∅": true,
|
||
+ " ": true,
|
||
+ " ": true,
|
||
+ " ": true,
|
||
+ "ŋ": true,
|
||
+ " ": true,
|
||
+ "ę": true,
|
||
+ "𝕖": true,
|
||
+ "⋕": true,
|
||
+ "⧣": true,
|
||
+ "⩱": true,
|
||
+ "ε": true,
|
||
+ "ε": true,
|
||
+ "ϵ": true,
|
||
+ "≖": true,
|
||
+ "≕": true,
|
||
+ "≂": true,
|
||
+ "⪖": true,
|
||
+ "⪕": true,
|
||
+ "=": true,
|
||
+ "≟": true,
|
||
+ "≡": true,
|
||
+ "⩸": true,
|
||
+ "⧥": true,
|
||
+ "≓": true,
|
||
+ "⥱": true,
|
||
+ "ℯ": true,
|
||
+ "≐": true,
|
||
+ "≂": true,
|
||
+ "η": true,
|
||
+ "ð": true,
|
||
+ "ð": true,
|
||
+ "ë": true,
|
||
+ "ë": true,
|
||
+ "€": true,
|
||
+ "!": true,
|
||
+ "∃": true,
|
||
+ "ℰ": true,
|
||
+ "ⅇ": true,
|
||
+ "≒": true,
|
||
+ "ф": true,
|
||
+ "♀": true,
|
||
+ "ffi": true,
|
||
+ "ff": true,
|
||
+ "ffl": true,
|
||
+ "𝔣": true,
|
||
+ "fi": true,
|
||
+ "fj": true,
|
||
+ "♭": true,
|
||
+ "fl": true,
|
||
+ "▱": true,
|
||
+ "ƒ": true,
|
||
+ "𝕗": true,
|
||
+ "∀": true,
|
||
+ "⋔": true,
|
||
+ "⫙": true,
|
||
+ "⨍": true,
|
||
+ "½": true,
|
||
+ "½": true,
|
||
+ "⅓": true,
|
||
+ "¼": true,
|
||
+ "¼": true,
|
||
+ "⅕": true,
|
||
+ "⅙": true,
|
||
+ "⅛": true,
|
||
+ "⅔": true,
|
||
+ "⅖": true,
|
||
+ "¾": true,
|
||
+ "¾": true,
|
||
+ "⅗": true,
|
||
+ "⅜": true,
|
||
+ "⅘": true,
|
||
+ "⅚": true,
|
||
+ "⅝": true,
|
||
+ "⅞": true,
|
||
+ "⁄": true,
|
||
+ "⌢": true,
|
||
+ "𝒻": true,
|
||
+ "≧": true,
|
||
+ "⪌": true,
|
||
+ "ǵ": true,
|
||
+ "γ": true,
|
||
+ "ϝ": true,
|
||
+ "⪆": true,
|
||
+ "ğ": true,
|
||
+ "ĝ": true,
|
||
+ "г": true,
|
||
+ "ġ": true,
|
||
+ "≥": true,
|
||
+ "⋛": true,
|
||
+ "≥": true,
|
||
+ "≧": true,
|
||
+ "⩾": true,
|
||
+ "⩾": true,
|
||
+ "⪩": true,
|
||
+ "⪀": true,
|
||
+ "⪂": true,
|
||
+ "⪄": true,
|
||
+ "⋛︀": true,
|
||
+ "⪔": true,
|
||
+ "𝔤": true,
|
||
+ "≫": true,
|
||
+ "⋙": true,
|
||
+ "ℷ": true,
|
||
+ "ѓ": true,
|
||
+ "≷": true,
|
||
+ "⪒": true,
|
||
+ "⪥": true,
|
||
+ "⪤": true,
|
||
+ "≩": true,
|
||
+ "⪊": true,
|
||
+ "⪊": true,
|
||
+ "⪈": true,
|
||
+ "⪈": true,
|
||
+ "≩": true,
|
||
+ "⋧": true,
|
||
+ "𝕘": true,
|
||
+ "`": true,
|
||
+ "ℊ": true,
|
||
+ "≳": true,
|
||
+ "⪎": true,
|
||
+ "⪐": true,
|
||
+ ">": true,
|
||
+ ">": true,
|
||
+ "⪧": true,
|
||
+ "⩺": true,
|
||
+ "⋗": true,
|
||
+ "⦕": true,
|
||
+ "⩼": true,
|
||
+ "⪆": true,
|
||
+ "⥸": true,
|
||
+ "⋗": true,
|
||
+ "⋛": true,
|
||
+ "⪌": true,
|
||
+ "≷": true,
|
||
+ "≳": true,
|
||
+ "≩︀": true,
|
||
+ "≩︀": true,
|
||
+ "⇔": true,
|
||
+ " ": true,
|
||
+ "½": true,
|
||
+ "ℋ": true,
|
||
+ "ъ": true,
|
||
+ "↔": true,
|
||
+ "⥈": true,
|
||
+ "↭": true,
|
||
+ "ℏ": true,
|
||
+ "ĥ": true,
|
||
+ "♥": true,
|
||
+ "♥": true,
|
||
+ "…": true,
|
||
+ "⊹": true,
|
||
+ "𝔥": true,
|
||
+ "⤥": true,
|
||
+ "⤦": true,
|
||
+ "⇿": true,
|
||
+ "∻": true,
|
||
+ "↩": true,
|
||
+ "↪": true,
|
||
+ "𝕙": true,
|
||
+ "―": true,
|
||
+ "𝒽": true,
|
||
+ "ℏ": true,
|
||
+ "ħ": true,
|
||
+ "⁃": true,
|
||
+ "‐": true,
|
||
+ "í": true,
|
||
+ "í": true,
|
||
+ "⁣": true,
|
||
+ "î": true,
|
||
+ "î": true,
|
||
+ "и": true,
|
||
+ "е": true,
|
||
+ "¡": true,
|
||
+ "¡": true,
|
||
+ "⇔": true,
|
||
+ "𝔦": true,
|
||
+ "ì": true,
|
||
+ "ì": true,
|
||
+ "ⅈ": true,
|
||
+ "⨌": true,
|
||
+ "∭": true,
|
||
+ "⧜": true,
|
||
+ "℩": true,
|
||
+ "ij": true,
|
||
+ "ī": true,
|
||
+ "ℑ": true,
|
||
+ "ℐ": true,
|
||
+ "ℑ": true,
|
||
+ "ı": true,
|
||
+ "⊷": true,
|
||
+ "Ƶ": true,
|
||
+ "∈": true,
|
||
+ "℅": true,
|
||
+ "∞": true,
|
||
+ "⧝": true,
|
||
+ "ı": true,
|
||
+ "∫": true,
|
||
+ "⊺": true,
|
||
+ "ℤ": true,
|
||
+ "⊺": true,
|
||
+ "⨗": true,
|
||
+ "⨼": true,
|
||
+ "ё": true,
|
||
+ "į": true,
|
||
+ "𝕚": true,
|
||
+ "ι": true,
|
||
+ "⨼": true,
|
||
+ "¿": true,
|
||
+ "¿": true,
|
||
+ "𝒾": true,
|
||
+ "∈": true,
|
||
+ "⋹": true,
|
||
+ "⋵": true,
|
||
+ "⋴": true,
|
||
+ "⋳": true,
|
||
+ "∈": true,
|
||
+ "⁢": true,
|
||
+ "ĩ": true,
|
||
+ "і": true,
|
||
+ "ï": true,
|
||
+ "ï": true,
|
||
+ "ĵ": true,
|
||
+ "й": true,
|
||
+ "𝔧": true,
|
||
+ "ȷ": true,
|
||
+ "𝕛": true,
|
||
+ "𝒿": true,
|
||
+ "ј": true,
|
||
+ "є": true,
|
||
+ "κ": true,
|
||
+ "ϰ": true,
|
||
+ "ķ": true,
|
||
+ "к": true,
|
||
+ "𝔨": true,
|
||
+ "ĸ": true,
|
||
+ "х": true,
|
||
+ "ќ": true,
|
||
+ "𝕜": true,
|
||
+ "𝓀": true,
|
||
+ "⇚": true,
|
||
+ "⇐": true,
|
||
+ "⤛": true,
|
||
+ "⤎": true,
|
||
+ "≦": true,
|
||
+ "⪋": true,
|
||
+ "⥢": true,
|
||
+ "ĺ": true,
|
||
+ "⦴": true,
|
||
+ "ℒ": true,
|
||
+ "λ": true,
|
||
+ "⟨": true,
|
||
+ "⦑": true,
|
||
+ "⟨": true,
|
||
+ "⪅": true,
|
||
+ "«": true,
|
||
+ "«": true,
|
||
+ "←": true,
|
||
+ "⇤": true,
|
||
+ "⤟": true,
|
||
+ "⤝": true,
|
||
+ "↩": true,
|
||
+ "↫": true,
|
||
+ "⤹": true,
|
||
+ "⥳": true,
|
||
+ "↢": true,
|
||
+ "⪫": true,
|
||
+ "⤙": true,
|
||
+ "⪭": true,
|
||
+ "⪭︀": true,
|
||
+ "⤌": true,
|
||
+ "❲": true,
|
||
+ "{": true,
|
||
+ "[": true,
|
||
+ "⦋": true,
|
||
+ "⦏": true,
|
||
+ "⦍": true,
|
||
+ "ľ": true,
|
||
+ "ļ": true,
|
||
+ "⌈": true,
|
||
+ "{": true,
|
||
+ "л": true,
|
||
+ "⤶": true,
|
||
+ "“": true,
|
||
+ "„": true,
|
||
+ "⥧": true,
|
||
+ "⥋": true,
|
||
+ "↲": true,
|
||
+ "≤": true,
|
||
+ "←": true,
|
||
+ "↢": true,
|
||
+ "↽": true,
|
||
+ "↼": true,
|
||
+ "⇇": true,
|
||
+ "↔": true,
|
||
+ "⇆": true,
|
||
+ "⇋": true,
|
||
+ "↭": true,
|
||
+ "⋋": true,
|
||
+ "⋚": true,
|
||
+ "≤": true,
|
||
+ "≦": true,
|
||
+ "⩽": true,
|
||
+ "⩽": true,
|
||
+ "⪨": true,
|
||
+ "⩿": true,
|
||
+ "⪁": true,
|
||
+ "⪃": true,
|
||
+ "⋚︀": true,
|
||
+ "⪓": true,
|
||
+ "⪅": true,
|
||
+ "⋖": true,
|
||
+ "⋚": true,
|
||
+ "⪋": true,
|
||
+ "≶": true,
|
||
+ "≲": true,
|
||
+ "⥼": true,
|
||
+ "⌊": true,
|
||
+ "𝔩": true,
|
||
+ "≶": true,
|
||
+ "⪑": true,
|
||
+ "↽": true,
|
||
+ "↼": true,
|
||
+ "⥪": true,
|
||
+ "▄": true,
|
||
+ "љ": true,
|
||
+ "≪": true,
|
||
+ "⇇": true,
|
||
+ "⌞": true,
|
||
+ "⥫": true,
|
||
+ "◺": true,
|
||
+ "ŀ": true,
|
||
+ "⎰": true,
|
||
+ "⎰": true,
|
||
+ "≨": true,
|
||
+ "⪉": true,
|
||
+ "⪉": true,
|
||
+ "⪇": true,
|
||
+ "⪇": true,
|
||
+ "≨": true,
|
||
+ "⋦": true,
|
||
+ "⟬": true,
|
||
+ "⇽": true,
|
||
+ "⟦": true,
|
||
+ "⟵": true,
|
||
+ "⟷": true,
|
||
+ "⟼": true,
|
||
+ "⟶": true,
|
||
+ "↫": true,
|
||
+ "↬": true,
|
||
+ "⦅": true,
|
||
+ "𝕝": true,
|
||
+ "⨭": true,
|
||
+ "⨴": true,
|
||
+ "∗": true,
|
||
+ "_": true,
|
||
+ "◊": true,
|
||
+ "◊": true,
|
||
+ "⧫": true,
|
||
+ "(": true,
|
||
+ "⦓": true,
|
||
+ "⇆": true,
|
||
+ "⌟": true,
|
||
+ "⇋": true,
|
||
+ "⥭": true,
|
||
+ "‎": true,
|
||
+ "⊿": true,
|
||
+ "‹": true,
|
||
+ "𝓁": true,
|
||
+ "↰": true,
|
||
+ "≲": true,
|
||
+ "⪍": true,
|
||
+ "⪏": true,
|
||
+ "[": true,
|
||
+ "‘": true,
|
||
+ "‚": true,
|
||
+ "ł": true,
|
||
+ "<": true,
|
||
+ "<": true,
|
||
+ "⪦": true,
|
||
+ "⩹": true,
|
||
+ "⋖": true,
|
||
+ "⋋": true,
|
||
+ "⋉": true,
|
||
+ "⥶": true,
|
||
+ "⩻": true,
|
||
+ "⦖": true,
|
||
+ "◃": true,
|
||
+ "⊴": true,
|
||
+ "◂": true,
|
||
+ "⥊": true,
|
||
+ "⥦": true,
|
||
+ "≨︀": true,
|
||
+ "≨︀": true,
|
||
+ "∺": true,
|
||
+ "¯": true,
|
||
+ "¯": true,
|
||
+ "♂": true,
|
||
+ "✠": true,
|
||
+ "✠": true,
|
||
+ "↦": true,
|
||
+ "↦": true,
|
||
+ "↧": true,
|
||
+ "↤": true,
|
||
+ "↥": true,
|
||
+ "▮": true,
|
||
+ "⨩": true,
|
||
+ "м": true,
|
||
+ "—": true,
|
||
+ "∡": true,
|
||
+ "𝔪": true,
|
||
+ "℧": true,
|
||
+ "µ": true,
|
||
+ "µ": true,
|
||
+ "∣": true,
|
||
+ "*": true,
|
||
+ "⫰": true,
|
||
+ "·": true,
|
||
+ "·": true,
|
||
+ "−": true,
|
||
+ "⊟": true,
|
||
+ "∸": true,
|
||
+ "⨪": true,
|
||
+ "⫛": true,
|
||
+ "…": true,
|
||
+ "∓": true,
|
||
+ "⊧": true,
|
||
+ "𝕞": true,
|
||
+ "∓": true,
|
||
+ "𝓂": true,
|
||
+ "∾": true,
|
||
+ "μ": true,
|
||
+ "⊸": true,
|
||
+ "⊸": true,
|
||
+ "⋙̸": true,
|
||
+ "≫⃒": true,
|
||
+ "≫̸": true,
|
||
+ "⇍": true,
|
||
+ "⇎": true,
|
||
+ "⋘̸": true,
|
||
+ "≪⃒": true,
|
||
+ "≪̸": true,
|
||
+ "⇏": true,
|
||
+ "⊯": true,
|
||
+ "⊮": true,
|
||
+ "∇": true,
|
||
+ "ń": true,
|
||
+ "∠⃒": true,
|
||
+ "≉": true,
|
||
+ "⩰̸": true,
|
||
+ "≋̸": true,
|
||
+ "ʼn": true,
|
||
+ "≉": true,
|
||
+ "♮": true,
|
||
+ "♮": true,
|
||
+ "ℕ": true,
|
||
+ " ": true,
|
||
+ " ": true,
|
||
+ "≎̸": true,
|
||
+ "≏̸": true,
|
||
+ "⩃": true,
|
||
+ "ň": true,
|
||
+ "ņ": true,
|
||
+ "≇": true,
|
||
+ "⩭̸": true,
|
||
+ "⩂": true,
|
||
+ "н": true,
|
||
+ "–": true,
|
||
+ "≠": true,
|
||
+ "⇗": true,
|
||
+ "⤤": true,
|
||
+ "↗": true,
|
||
+ "↗": true,
|
||
+ "≐̸": true,
|
||
+ "≢": true,
|
||
+ "⤨": true,
|
||
+ "≂̸": true,
|
||
+ "∄": true,
|
||
+ "∄": true,
|
||
+ "𝔫": true,
|
||
+ "≧̸": true,
|
||
+ "≱": true,
|
||
+ "≱": true,
|
||
+ "≧̸": true,
|
||
+ "⩾̸": true,
|
||
+ "⩾̸": true,
|
||
+ "≵": true,
|
||
+ "≯": true,
|
||
+ "≯": true,
|
||
+ "⇎": true,
|
||
+ "↮": true,
|
||
+ "⫲": true,
|
||
+ "∋": true,
|
||
+ "⋼": true,
|
||
+ "⋺": true,
|
||
+ "∋": true,
|
||
+ "њ": true,
|
||
+ "⇍": true,
|
||
+ "≦̸": true,
|
||
+ "↚": true,
|
||
+ "‥": true,
|
||
+ "≰": true,
|
||
+ "↚": true,
|
||
+ "↮": true,
|
||
+ "≰": true,
|
||
+ "≦̸": true,
|
||
+ "⩽̸": true,
|
||
+ "⩽̸": true,
|
||
+ "≮": true,
|
||
+ "≴": true,
|
||
+ "≮": true,
|
||
+ "⋪": true,
|
||
+ "⋬": true,
|
||
+ "∤": true,
|
||
+ "𝕟": true,
|
||
+ "¬": true,
|
||
+ "¬": true,
|
||
+ "∉": true,
|
||
+ "⋹̸": true,
|
||
+ "⋵̸": true,
|
||
+ "∉": true,
|
||
+ "⋷": true,
|
||
+ "⋶": true,
|
||
+ "∌": true,
|
||
+ "∌": true,
|
||
+ "⋾": true,
|
||
+ "⋽": true,
|
||
+ "∦": true,
|
||
+ "∦": true,
|
||
+ "⫽⃥": true,
|
||
+ "∂̸": true,
|
||
+ "⨔": true,
|
||
+ "⊀": true,
|
||
+ "⋠": true,
|
||
+ "⪯̸": true,
|
||
+ "⊀": true,
|
||
+ "⪯̸": true,
|
||
+ "⇏": true,
|
||
+ "↛": true,
|
||
+ "⤳̸": true,
|
||
+ "↝̸": true,
|
||
+ "↛": true,
|
||
+ "⋫": true,
|
||
+ "⋭": true,
|
||
+ "⊁": true,
|
||
+ "⋡": true,
|
||
+ "⪰̸": true,
|
||
+ "𝓃": true,
|
||
+ "∤": true,
|
||
+ "∦": true,
|
||
+ "≁": true,
|
||
+ "≄": true,
|
||
+ "≄": true,
|
||
+ "∤": true,
|
||
+ "∦": true,
|
||
+ "⋢": true,
|
||
+ "⋣": true,
|
||
+ "⊄": true,
|
||
+ "⫅̸": true,
|
||
+ "⊈": true,
|
||
+ "⊂⃒": true,
|
||
+ "⊈": true,
|
||
+ "⫅̸": true,
|
||
+ "⊁": true,
|
||
+ "⪰̸": true,
|
||
+ "⊅": true,
|
||
+ "⫆̸": true,
|
||
+ "⊉": true,
|
||
+ "⊃⃒": true,
|
||
+ "⊉": true,
|
||
+ "⫆̸": true,
|
||
+ "≹": true,
|
||
+ "ñ": true,
|
||
+ "ñ": true,
|
||
+ "≸": true,
|
||
+ "⋪": true,
|
||
+ "⋬": true,
|
||
+ "⋫": true,
|
||
+ "⋭": true,
|
||
+ "ν": true,
|
||
+ "#": true,
|
||
+ "№": true,
|
||
+ " ": true,
|
||
+ "⊭": true,
|
||
+ "⤄": true,
|
||
+ "≍⃒": true,
|
||
+ "⊬": true,
|
||
+ "≥⃒": true,
|
||
+ ">⃒": true,
|
||
+ "⧞": true,
|
||
+ "⤂": true,
|
||
+ "≤⃒": true,
|
||
+ "<⃒": true,
|
||
+ "⊴⃒": true,
|
||
+ "⤃": true,
|
||
+ "⊵⃒": true,
|
||
+ "∼⃒": true,
|
||
+ "⇖": true,
|
||
+ "⤣": true,
|
||
+ "↖": true,
|
||
+ "↖": true,
|
||
+ "⤧": true,
|
||
+ "Ⓢ": true,
|
||
+ "ó": true,
|
||
+ "ó": true,
|
||
+ "⊛": true,
|
||
+ "⊚": true,
|
||
+ "ô": true,
|
||
+ "ô": true,
|
||
+ "о": true,
|
||
+ "⊝": true,
|
||
+ "ő": true,
|
||
+ "⨸": true,
|
||
+ "⊙": true,
|
||
+ "⦼": true,
|
||
+ "œ": true,
|
||
+ "⦿": true,
|
||
+ "𝔬": true,
|
||
+ "˛": true,
|
||
+ "ò": true,
|
||
+ "ò": true,
|
||
+ "⧁": true,
|
||
+ "⦵": true,
|
||
+ "Ω": true,
|
||
+ "∮": true,
|
||
+ "↺": true,
|
||
+ "⦾": true,
|
||
+ "⦻": true,
|
||
+ "‾": true,
|
||
+ "⧀": true,
|
||
+ "ō": true,
|
||
+ "ω": true,
|
||
+ "ο": true,
|
||
+ "⦶": true,
|
||
+ "⊖": true,
|
||
+ "𝕠": true,
|
||
+ "⦷": true,
|
||
+ "⦹": true,
|
||
+ "⊕": true,
|
||
+ "∨": true,
|
||
+ "↻": true,
|
||
+ "⩝": true,
|
||
+ "ℴ": true,
|
||
+ "ℴ": true,
|
||
+ "ª": true,
|
||
+ "ª": true,
|
||
+ "º": true,
|
||
+ "º": true,
|
||
+ "⊶": true,
|
||
+ "⩖": true,
|
||
+ "⩗": true,
|
||
+ "⩛": true,
|
||
+ "ℴ": true,
|
||
+ "ø": true,
|
||
+ "ø": true,
|
||
+ "⊘": true,
|
||
+ "õ": true,
|
||
+ "õ": true,
|
||
+ "⊗": true,
|
||
+ "⨶": true,
|
||
+ "ö": true,
|
||
+ "ö": true,
|
||
+ "⌽": true,
|
||
+ "∥": true,
|
||
+ "¶": true,
|
||
+ "¶": true,
|
||
+ "∥": true,
|
||
+ "⫳": true,
|
||
+ "⫽": true,
|
||
+ "∂": true,
|
||
+ "п": true,
|
||
+ "%": true,
|
||
+ ".": true,
|
||
+ "‰": true,
|
||
+ "⊥": true,
|
||
+ "‱": true,
|
||
+ "𝔭": true,
|
||
+ "φ": true,
|
||
+ "ϕ": true,
|
||
+ "ℳ": true,
|
||
+ "☎": true,
|
||
+ "π": true,
|
||
+ "⋔": true,
|
||
+ "ϖ": true,
|
||
+ "ℏ": true,
|
||
+ "ℎ": true,
|
||
+ "ℏ": true,
|
||
+ "+": true,
|
||
+ "⨣": true,
|
||
+ "⊞": true,
|
||
+ "⨢": true,
|
||
+ "∔": true,
|
||
+ "⨥": true,
|
||
+ "⩲": true,
|
||
+ "±": true,
|
||
+ "±": true,
|
||
+ "⨦": true,
|
||
+ "⨧": true,
|
||
+ "±": true,
|
||
+ "⨕": true,
|
||
+ "𝕡": true,
|
||
+ "£": true,
|
||
+ "£": true,
|
||
+ "≺": true,
|
||
+ "⪳": true,
|
||
+ "⪷": true,
|
||
+ "≼": true,
|
||
+ "⪯": true,
|
||
+ "≺": true,
|
||
+ "⪷": true,
|
||
+ "≼": true,
|
||
+ "⪯": true,
|
||
+ "⪹": true,
|
||
+ "⪵": true,
|
||
+ "⋨": true,
|
||
+ "≾": true,
|
||
+ "′": true,
|
||
+ "ℙ": true,
|
||
+ "⪵": true,
|
||
+ "⪹": true,
|
||
+ "⋨": true,
|
||
+ "∏": true,
|
||
+ "⌮": true,
|
||
+ "⌒": true,
|
||
+ "⌓": true,
|
||
+ "∝": true,
|
||
+ "∝": true,
|
||
+ "≾": true,
|
||
+ "⊰": true,
|
||
+ "𝓅": true,
|
||
+ "ψ": true,
|
||
+ " ": true,
|
||
+ "𝔮": true,
|
||
+ "⨌": true,
|
||
+ "𝕢": true,
|
||
+ "⁗": true,
|
||
+ "𝓆": true,
|
||
+ "ℍ": true,
|
||
+ "⨖": true,
|
||
+ "?": true,
|
||
+ "≟": true,
|
||
+ """: true,
|
||
+ """: true,
|
||
+ "⇛": true,
|
||
+ "⇒": true,
|
||
+ "⤜": true,
|
||
+ "⤏": true,
|
||
+ "⥤": true,
|
||
+ "∽̱": true,
|
||
+ "ŕ": true,
|
||
+ "√": true,
|
||
+ "⦳": true,
|
||
+ "⟩": true,
|
||
+ "⦒": true,
|
||
+ "⦥": true,
|
||
+ "⟩": true,
|
||
+ "»": true,
|
||
+ "»": true,
|
||
+ "→": true,
|
||
+ "⥵": true,
|
||
+ "⇥": true,
|
||
+ "⤠": true,
|
||
+ "⤳": true,
|
||
+ "⤞": true,
|
||
+ "↪": true,
|
||
+ "↬": true,
|
||
+ "⥅": true,
|
||
+ "⥴": true,
|
||
+ "↣": true,
|
||
+ "↝": true,
|
||
+ "⤚": true,
|
||
+ "∶": true,
|
||
+ "ℚ": true,
|
||
+ "⤍": true,
|
||
+ "❳": true,
|
||
+ "}": true,
|
||
+ "]": true,
|
||
+ "⦌": true,
|
||
+ "⦎": true,
|
||
+ "⦐": true,
|
||
+ "ř": true,
|
||
+ "ŗ": true,
|
||
+ "⌉": true,
|
||
+ "}": true,
|
||
+ "р": true,
|
||
+ "⤷": true,
|
||
+ "⥩": true,
|
||
+ "”": true,
|
||
+ "”": true,
|
||
+ "↳": true,
|
||
+ "ℜ": true,
|
||
+ "ℛ": true,
|
||
+ "ℜ": true,
|
||
+ "ℝ": true,
|
||
+ "▭": true,
|
||
+ "®": true,
|
||
+ "®": true,
|
||
+ "⥽": true,
|
||
+ "⌋": true,
|
||
+ "𝔯": true,
|
||
+ "⇁": true,
|
||
+ "⇀": true,
|
||
+ "⥬": true,
|
||
+ "ρ": true,
|
||
+ "ϱ": true,
|
||
+ "→": true,
|
||
+ "↣": true,
|
||
+ "⇁": true,
|
||
+ "⇀": true,
|
||
+ "⇄": true,
|
||
+ "⇌": true,
|
||
+ "⇉": true,
|
||
+ "↝": true,
|
||
+ "⋌": true,
|
||
+ "˚": true,
|
||
+ "≓": true,
|
||
+ "⇄": true,
|
||
+ "⇌": true,
|
||
+ "‏": true,
|
||
+ "⎱": true,
|
||
+ "⎱": true,
|
||
+ "⫮": true,
|
||
+ "⟭": true,
|
||
+ "⇾": true,
|
||
+ "⟧": true,
|
||
+ "⦆": true,
|
||
+ "𝕣": true,
|
||
+ "⨮": true,
|
||
+ "⨵": true,
|
||
+ ")": true,
|
||
+ "⦔": true,
|
||
+ "⨒": true,
|
||
+ "⇉": true,
|
||
+ "›": true,
|
||
+ "𝓇": true,
|
||
+ "↱": true,
|
||
+ "]": true,
|
||
+ "’": true,
|
||
+ "’": true,
|
||
+ "⋌": true,
|
||
+ "⋊": true,
|
||
+ "▹": true,
|
||
+ "⊵": true,
|
||
+ "▸": true,
|
||
+ "⧎": true,
|
||
+ "⥨": true,
|
||
+ "℞": true,
|
||
+ "ś": true,
|
||
+ "‚": true,
|
||
+ "≻": true,
|
||
+ "⪴": true,
|
||
+ "⪸": true,
|
||
+ "š": true,
|
||
+ "≽": true,
|
||
+ "⪰": true,
|
||
+ "ş": true,
|
||
+ "ŝ": true,
|
||
+ "⪶": true,
|
||
+ "⪺": true,
|
||
+ "⋩": true,
|
||
+ "⨓": true,
|
||
+ "≿": true,
|
||
+ "с": true,
|
||
+ "⋅": true,
|
||
+ "⊡": true,
|
||
+ "⩦": true,
|
||
+ "⇘": true,
|
||
+ "⤥": true,
|
||
+ "↘": true,
|
||
+ "↘": true,
|
||
+ "§": true,
|
||
+ "§": true,
|
||
+ ";": true,
|
||
+ "⤩": true,
|
||
+ "∖": true,
|
||
+ "∖": true,
|
||
+ "✶": true,
|
||
+ "𝔰": true,
|
||
+ "⌢": true,
|
||
+ "♯": true,
|
||
+ "щ": true,
|
||
+ "ш": true,
|
||
+ "∣": true,
|
||
+ "∥": true,
|
||
+ "­": true,
|
||
+ "­": true,
|
||
+ "σ": true,
|
||
+ "ς": true,
|
||
+ "ς": true,
|
||
+ "∼": true,
|
||
+ "⩪": true,
|
||
+ "≃": true,
|
||
+ "≃": true,
|
||
+ "⪞": true,
|
||
+ "⪠": true,
|
||
+ "⪝": true,
|
||
+ "⪟": true,
|
||
+ "≆": true,
|
||
+ "⨤": true,
|
||
+ "⥲": true,
|
||
+ "←": true,
|
||
+ "∖": true,
|
||
+ "⨳": true,
|
||
+ "⧤": true,
|
||
+ "∣": true,
|
||
+ "⌣": true,
|
||
+ "⪪": true,
|
||
+ "⪬": true,
|
||
+ "⪬︀": true,
|
||
+ "ь": true,
|
||
+ "/": true,
|
||
+ "⧄": true,
|
||
+ "⌿": true,
|
||
+ "𝕤": true,
|
||
+ "♠": true,
|
||
+ "♠": true,
|
||
+ "∥": true,
|
||
+ "⊓": true,
|
||
+ "⊓︀": true,
|
||
+ "⊔": true,
|
||
+ "⊔︀": true,
|
||
+ "⊏": true,
|
||
+ "⊑": true,
|
||
+ "⊏": true,
|
||
+ "⊑": true,
|
||
+ "⊐": true,
|
||
+ "⊒": true,
|
||
+ "⊐": true,
|
||
+ "⊒": true,
|
||
+ "□": true,
|
||
+ "□": true,
|
||
+ "▪": true,
|
||
+ "▪": true,
|
||
+ "→": true,
|
||
+ "𝓈": true,
|
||
+ "∖": true,
|
||
+ "⌣": true,
|
||
+ "⋆": true,
|
||
+ "☆": true,
|
||
+ "★": true,
|
||
+ "ϵ": true,
|
||
+ "ϕ": true,
|
||
+ "¯": true,
|
||
+ "⊂": true,
|
||
+ "⫅": true,
|
||
+ "⪽": true,
|
||
+ "⊆": true,
|
||
+ "⫃": true,
|
||
+ "⫁": true,
|
||
+ "⫋": true,
|
||
+ "⊊": true,
|
||
+ "⪿": true,
|
||
+ "⥹": true,
|
||
+ "⊂": true,
|
||
+ "⊆": true,
|
||
+ "⫅": true,
|
||
+ "⊊": true,
|
||
+ "⫋": true,
|
||
+ "⫇": true,
|
||
+ "⫕": true,
|
||
+ "⫓": true,
|
||
+ "≻": true,
|
||
+ "⪸": true,
|
||
+ "≽": true,
|
||
+ "⪰": true,
|
||
+ "⪺": true,
|
||
+ "⪶": true,
|
||
+ "⋩": true,
|
||
+ "≿": true,
|
||
+ "∑": true,
|
||
+ "♪": true,
|
||
+ "¹": true,
|
||
+ "¹": true,
|
||
+ "²": true,
|
||
+ "²": true,
|
||
+ "³": true,
|
||
+ "³": true,
|
||
+ "⊃": true,
|
||
+ "⫆": true,
|
||
+ "⪾": true,
|
||
+ "⫘": true,
|
||
+ "⊇": true,
|
||
+ "⫄": true,
|
||
+ "⟉": true,
|
||
+ "⫗": true,
|
||
+ "⥻": true,
|
||
+ "⫂": true,
|
||
+ "⫌": true,
|
||
+ "⊋": true,
|
||
+ "⫀": true,
|
||
+ "⊃": true,
|
||
+ "⊇": true,
|
||
+ "⫆": true,
|
||
+ "⊋": true,
|
||
+ "⫌": true,
|
||
+ "⫈": true,
|
||
+ "⫔": true,
|
||
+ "⫖": true,
|
||
+ "⇙": true,
|
||
+ "⤦": true,
|
||
+ "↙": true,
|
||
+ "↙": true,
|
||
+ "⤪": true,
|
||
+ "ß": true,
|
||
+ "ß": true,
|
||
+ "⌖": true,
|
||
+ "τ": true,
|
||
+ "⎴": true,
|
||
+ "ť": true,
|
||
+ "ţ": true,
|
||
+ "т": true,
|
||
+ "⃛": true,
|
||
+ "⌕": true,
|
||
+ "𝔱": true,
|
||
+ "∴": true,
|
||
+ "∴": true,
|
||
+ "θ": true,
|
||
+ "ϑ": true,
|
||
+ "ϑ": true,
|
||
+ "≈": true,
|
||
+ "∼": true,
|
||
+ " ": true,
|
||
+ "≈": true,
|
||
+ "∼": true,
|
||
+ "þ": true,
|
||
+ "þ": true,
|
||
+ "˜": true,
|
||
+ "×": true,
|
||
+ "×": true,
|
||
+ "⊠": true,
|
||
+ "⨱": true,
|
||
+ "⨰": true,
|
||
+ "∭": true,
|
||
+ "⤨": true,
|
||
+ "⊤": true,
|
||
+ "⌶": true,
|
||
+ "⫱": true,
|
||
+ "𝕥": true,
|
||
+ "⫚": true,
|
||
+ "⤩": true,
|
||
+ "‴": true,
|
||
+ "™": true,
|
||
+ "▵": true,
|
||
+ "▿": true,
|
||
+ "◃": true,
|
||
+ "⊴": true,
|
||
+ "≜": true,
|
||
+ "▹": true,
|
||
+ "⊵": true,
|
||
+ "◬": true,
|
||
+ "≜": true,
|
||
+ "⨺": true,
|
||
+ "⨹": true,
|
||
+ "⧍": true,
|
||
+ "⨻": true,
|
||
+ "⏢": true,
|
||
+ "𝓉": true,
|
||
+ "ц": true,
|
||
+ "ћ": true,
|
||
+ "ŧ": true,
|
||
+ "≬": true,
|
||
+ "↞": true,
|
||
+ "↠": true,
|
||
+ "⇑": true,
|
||
+ "⥣": true,
|
||
+ "ú": true,
|
||
+ "ú": true,
|
||
+ "↑": true,
|
||
+ "ў": true,
|
||
+ "ŭ": true,
|
||
+ "û": true,
|
||
+ "û": true,
|
||
+ "у": true,
|
||
+ "⇅": true,
|
||
+ "ű": true,
|
||
+ "⥮": true,
|
||
+ "⥾": true,
|
||
+ "𝔲": true,
|
||
+ "ù": true,
|
||
+ "ù": true,
|
||
+ "↿": true,
|
||
+ "↾": true,
|
||
+ "▀": true,
|
||
+ "⌜": true,
|
||
+ "⌜": true,
|
||
+ "⌏": true,
|
||
+ "◸": true,
|
||
+ "ū": true,
|
||
+ "¨": true,
|
||
+ "¨": true,
|
||
+ "ų": true,
|
||
+ "𝕦": true,
|
||
+ "↑": true,
|
||
+ "↕": true,
|
||
+ "↿": true,
|
||
+ "↾": true,
|
||
+ "⊎": true,
|
||
+ "υ": true,
|
||
+ "ϒ": true,
|
||
+ "υ": true,
|
||
+ "⇈": true,
|
||
+ "⌝": true,
|
||
+ "⌝": true,
|
||
+ "⌎": true,
|
||
+ "ů": true,
|
||
+ "◹": true,
|
||
+ "𝓊": true,
|
||
+ "⋰": true,
|
||
+ "ũ": true,
|
||
+ "▵": true,
|
||
+ "▴": true,
|
||
+ "⇈": true,
|
||
+ "ü": true,
|
||
+ "ü": true,
|
||
+ "⦧": true,
|
||
+ "⇕": true,
|
||
+ "⫨": true,
|
||
+ "⫩": true,
|
||
+ "⊨": true,
|
||
+ "⦜": true,
|
||
+ "ϵ": true,
|
||
+ "ϰ": true,
|
||
+ "∅": true,
|
||
+ "ϕ": true,
|
||
+ "ϖ": true,
|
||
+ "∝": true,
|
||
+ "↕": true,
|
||
+ "ϱ": true,
|
||
+ "ς": true,
|
||
+ "⊊︀": true,
|
||
+ "⫋︀": true,
|
||
+ "⊋︀": true,
|
||
+ "⫌︀": true,
|
||
+ "ϑ": true,
|
||
+ "⊲": true,
|
||
+ "⊳": true,
|
||
+ "в": true,
|
||
+ "⊢": true,
|
||
+ "∨": true,
|
||
+ "⊻": true,
|
||
+ "≚": true,
|
||
+ "⋮": true,
|
||
+ "|": true,
|
||
+ "|": true,
|
||
+ "𝔳": true,
|
||
+ "⊲": true,
|
||
+ "⊂⃒": true,
|
||
+ "⊃⃒": true,
|
||
+ "𝕧": true,
|
||
+ "∝": true,
|
||
+ "⊳": true,
|
||
+ "𝓋": true,
|
||
+ "⫋︀": true,
|
||
+ "⊊︀": true,
|
||
+ "⫌︀": true,
|
||
+ "⊋︀": true,
|
||
+ "⦚": true,
|
||
+ "ŵ": true,
|
||
+ "⩟": true,
|
||
+ "∧": true,
|
||
+ "≙": true,
|
||
+ "℘": true,
|
||
+ "𝔴": true,
|
||
+ "𝕨": true,
|
||
+ "℘": true,
|
||
+ "≀": true,
|
||
+ "≀": true,
|
||
+ "𝓌": true,
|
||
+ "⋂": true,
|
||
+ "◯": true,
|
||
+ "⋃": true,
|
||
+ "▽": true,
|
||
+ "𝔵": true,
|
||
+ "⟺": true,
|
||
+ "⟷": true,
|
||
+ "ξ": true,
|
||
+ "⟸": true,
|
||
+ "⟵": true,
|
||
+ "⟼": true,
|
||
+ "⋻": true,
|
||
+ "⨀": true,
|
||
+ "𝕩": true,
|
||
+ "⨁": true,
|
||
+ "⨂": true,
|
||
+ "⟹": true,
|
||
+ "⟶": true,
|
||
+ "𝓍": true,
|
||
+ "⨆": true,
|
||
+ "⨄": true,
|
||
+ "△": true,
|
||
+ "⋁": true,
|
||
+ "⋀": true,
|
||
+ "ý": true,
|
||
+ "ý": true,
|
||
+ "я": true,
|
||
+ "ŷ": true,
|
||
+ "ы": true,
|
||
+ "¥": true,
|
||
+ "¥": true,
|
||
+ "𝔶": true,
|
||
+ "ї": true,
|
||
+ "𝕪": true,
|
||
+ "𝓎": true,
|
||
+ "ю": true,
|
||
+ "ÿ": true,
|
||
+ "ÿ": true,
|
||
+ "ź": true,
|
||
+ "ž": true,
|
||
+ "з": true,
|
||
+ "ż": true,
|
||
+ "ℨ": true,
|
||
+ "ζ": true,
|
||
+ "𝔷": true,
|
||
+ "ж": true,
|
||
+ "⇝": true,
|
||
+ "𝕫": true,
|
||
+ "𝓏": true,
|
||
+ "‍": true,
|
||
+ "‌": true,
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go
|
||
new file mode 100644
|
||
index 000000000000..6ab60102c9bf
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/esc.go
|
||
@@ -0,0 +1,70 @@
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "html"
|
||
+ "io"
|
||
+)
|
||
+
|
||
+var htmlEscaper = [256][]byte{
|
||
+ '&': []byte("&"),
|
||
+ '<': []byte("<"),
|
||
+ '>': []byte(">"),
|
||
+ '"': []byte("""),
|
||
+}
|
||
+
|
||
+func escapeHTML(w io.Writer, s []byte) {
|
||
+ escapeEntities(w, s, false)
|
||
+}
|
||
+
|
||
+func escapeAllHTML(w io.Writer, s []byte) {
|
||
+ escapeEntities(w, s, true)
|
||
+}
|
||
+
|
||
+func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) {
|
||
+ var start, end int
|
||
+ for end < len(s) {
|
||
+ escSeq := htmlEscaper[s[end]]
|
||
+ if escSeq != nil {
|
||
+ isEntity, entityEnd := nodeIsEntity(s, end)
|
||
+ if isEntity && !escapeValidEntities {
|
||
+ w.Write(s[start : entityEnd+1])
|
||
+ start = entityEnd + 1
|
||
+ } else {
|
||
+ w.Write(s[start:end])
|
||
+ w.Write(escSeq)
|
||
+ start = end + 1
|
||
+ }
|
||
+ }
|
||
+ end++
|
||
+ }
|
||
+ if start < len(s) && end <= len(s) {
|
||
+ w.Write(s[start:end])
|
||
+ }
|
||
+}
|
||
+
|
||
+func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) {
|
||
+ isEntity = false
|
||
+ endEntityPos = end + 1
|
||
+
|
||
+ if s[end] == '&' {
|
||
+ for endEntityPos < len(s) {
|
||
+ if s[endEntityPos] == ';' {
|
||
+ if entities[string(s[end:endEntityPos+1])] {
|
||
+ isEntity = true
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' {
|
||
+ break
|
||
+ }
|
||
+ endEntityPos++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return isEntity, endEntityPos
|
||
+}
|
||
+
|
||
+func escLink(w io.Writer, text []byte) {
|
||
+ unesc := html.UnescapeString(string(text))
|
||
+ escapeHTML(w, []byte(unesc))
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go
|
||
new file mode 100644
|
||
index 000000000000..cb4f26e30fd5
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/html.go
|
||
@@ -0,0 +1,952 @@
|
||
+//
|
||
+// Blackfriday Markdown Processor
|
||
+// Available at http://github.com/russross/blackfriday
|
||
+//
|
||
+// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||
+// Distributed under the Simplified BSD License.
|
||
+// See README.md for details.
|
||
+//
|
||
+
|
||
+//
|
||
+//
|
||
+// HTML rendering backend
|
||
+//
|
||
+//
|
||
+
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "regexp"
|
||
+ "strings"
|
||
+)
|
||
+
|
||
+// HTMLFlags control optional behavior of HTML renderer.
|
||
+type HTMLFlags int
|
||
+
|
||
+// HTML renderer configuration options.
|
||
+const (
|
||
+ HTMLFlagsNone HTMLFlags = 0
|
||
+ SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks
|
||
+ SkipImages // Skip embedded images
|
||
+ SkipLinks // Skip all links
|
||
+ Safelink // Only link to trusted protocols
|
||
+ NofollowLinks // Only link with rel="nofollow"
|
||
+ NoreferrerLinks // Only link with rel="noreferrer"
|
||
+ NoopenerLinks // Only link with rel="noopener"
|
||
+ HrefTargetBlank // Add a blank target
|
||
+ CompletePage // Generate a complete HTML page
|
||
+ UseXHTML // Generate XHTML output instead of HTML
|
||
+ FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source
|
||
+ Smartypants // Enable smart punctuation substitutions
|
||
+ SmartypantsFractions // Enable smart fractions (with Smartypants)
|
||
+ SmartypantsDashes // Enable smart dashes (with Smartypants)
|
||
+ SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants)
|
||
+ SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering
|
||
+ SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants)
|
||
+ TOC // Generate a table of contents
|
||
+)
|
||
+
|
||
+var (
|
||
+ htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag)
|
||
+)
|
||
+
|
||
+const (
|
||
+ htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" +
|
||
+ processingInstruction + "|" + declaration + "|" + cdata + ")"
|
||
+ closeTag = "</" + tagName + "\\s*[>]"
|
||
+ openTag = "<" + tagName + attribute + "*" + "\\s*/?>"
|
||
+ attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)"
|
||
+ attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")"
|
||
+ attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")"
|
||
+ attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*"
|
||
+ cdata = "<!\\[CDATA\\[[\\s\\S]*?\\]\\]>"
|
||
+ declaration = "<![A-Z]+" + "\\s+[^>]*>"
|
||
+ doubleQuotedValue = "\"[^\"]*\""
|
||
+ htmlComment = "<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->"
|
||
+ processingInstruction = "[<][?].*?[?][>]"
|
||
+ singleQuotedValue = "'[^']*'"
|
||
+ tagName = "[A-Za-z][A-Za-z0-9-]*"
|
||
+ unquotedValue = "[^\"'=<>`\\x00-\\x20]+"
|
||
+)
|
||
+
|
||
+// HTMLRendererParameters is a collection of supplementary parameters tweaking
|
||
+// the behavior of various parts of HTML renderer.
|
||
+type HTMLRendererParameters struct {
|
||
+ // Prepend this text to each relative URL.
|
||
+ AbsolutePrefix string
|
||
+ // Add this text to each footnote anchor, to ensure uniqueness.
|
||
+ FootnoteAnchorPrefix string
|
||
+ // Show this text inside the <a> tag for a footnote return link, if the
|
||
+ // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
|
||
+ // <sup>[return]</sup> is used.
|
||
+ FootnoteReturnLinkContents string
|
||
+ // If set, add this text to the front of each Heading ID, to ensure
|
||
+ // uniqueness.
|
||
+ HeadingIDPrefix string
|
||
+ // If set, add this text to the back of each Heading ID, to ensure uniqueness.
|
||
+ HeadingIDSuffix string
|
||
+ // Increase heading levels: if the offset is 1, <h1> becomes <h2> etc.
|
||
+ // Negative offset is also valid.
|
||
+ // Resulting levels are clipped between 1 and 6.
|
||
+ HeadingLevelOffset int
|
||
+
|
||
+ Title string // Document title (used if CompletePage is set)
|
||
+ CSS string // Optional CSS file URL (used if CompletePage is set)
|
||
+ Icon string // Optional icon file URL (used if CompletePage is set)
|
||
+
|
||
+ Flags HTMLFlags // Flags allow customizing this renderer's behavior
|
||
+}
|
||
+
|
||
+// HTMLRenderer is a type that implements the Renderer interface for HTML output.
|
||
+//
|
||
+// Do not create this directly, instead use the NewHTMLRenderer function.
|
||
+type HTMLRenderer struct {
|
||
+ HTMLRendererParameters
|
||
+
|
||
+ closeTag string // how to end singleton tags: either " />" or ">"
|
||
+
|
||
+ // Track heading IDs to prevent ID collision in a single generation.
|
||
+ headingIDs map[string]int
|
||
+
|
||
+ lastOutputLen int
|
||
+ disableTags int
|
||
+
|
||
+ sr *SPRenderer
|
||
+}
|
||
+
|
||
+const (
|
||
+ xhtmlClose = " />"
|
||
+ htmlClose = ">"
|
||
+)
|
||
+
|
||
+// NewHTMLRenderer creates and configures an HTMLRenderer object, which
|
||
+// satisfies the Renderer interface.
|
||
+func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer {
|
||
+ // configure the rendering engine
|
||
+ closeTag := htmlClose
|
||
+ if params.Flags&UseXHTML != 0 {
|
||
+ closeTag = xhtmlClose
|
||
+ }
|
||
+
|
||
+ if params.FootnoteReturnLinkContents == "" {
|
||
+ // U+FE0E is VARIATION SELECTOR-15.
|
||
+ // It suppresses automatic emoji presentation of the preceding
|
||
+ // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS.
|
||
+ params.FootnoteReturnLinkContents = "<span aria-label='Return'>↩\ufe0e</span>"
|
||
+ }
|
||
+
|
||
+ return &HTMLRenderer{
|
||
+ HTMLRendererParameters: params,
|
||
+
|
||
+ closeTag: closeTag,
|
||
+ headingIDs: make(map[string]int),
|
||
+
|
||
+ sr: NewSmartypantsRenderer(params.Flags),
|
||
+ }
|
||
+}
|
||
+
|
||
+func isHTMLTag(tag []byte, tagname string) bool {
|
||
+ found, _ := findHTMLTagPos(tag, tagname)
|
||
+ return found
|
||
+}
|
||
+
|
||
+// Look for a character, but ignore it when it's in any kind of quotes, it
|
||
+// might be JavaScript
|
||
+func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
|
||
+ inSingleQuote := false
|
||
+ inDoubleQuote := false
|
||
+ inGraveQuote := false
|
||
+ i := start
|
||
+ for i < len(html) {
|
||
+ switch {
|
||
+ case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
|
||
+ return i
|
||
+ case html[i] == '\'':
|
||
+ inSingleQuote = !inSingleQuote
|
||
+ case html[i] == '"':
|
||
+ inDoubleQuote = !inDoubleQuote
|
||
+ case html[i] == '`':
|
||
+ inGraveQuote = !inGraveQuote
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ return start
|
||
+}
|
||
+
|
||
+func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
|
||
+ i := 0
|
||
+ if i < len(tag) && tag[0] != '<' {
|
||
+ return false, -1
|
||
+ }
|
||
+ i++
|
||
+ i = skipSpace(tag, i)
|
||
+
|
||
+ if i < len(tag) && tag[i] == '/' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ i = skipSpace(tag, i)
|
||
+ j := 0
|
||
+ for ; i < len(tag); i, j = i+1, j+1 {
|
||
+ if j >= len(tagname) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ if strings.ToLower(string(tag[i]))[0] != tagname[j] {
|
||
+ return false, -1
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if i == len(tag) {
|
||
+ return false, -1
|
||
+ }
|
||
+
|
||
+ rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
|
||
+ if rightAngle >= i {
|
||
+ return true, rightAngle
|
||
+ }
|
||
+
|
||
+ return false, -1
|
||
+}
|
||
+
|
||
+func skipSpace(tag []byte, i int) int {
|
||
+ for i < len(tag) && isspace(tag[i]) {
|
||
+ i++
|
||
+ }
|
||
+ return i
|
||
+}
|
||
+
|
||
+func isRelativeLink(link []byte) (yes bool) {
|
||
+ // a tag begin with '#'
|
||
+ if link[0] == '#' {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // link begin with '/' but not '//', the second maybe a protocol relative link
|
||
+ if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // only the root '/'
|
||
+ if len(link) == 1 && link[0] == '/' {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // current directory : begin with "./"
|
||
+ if bytes.HasPrefix(link, []byte("./")) {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ // parent directory : begin with "../"
|
||
+ if bytes.HasPrefix(link, []byte("../")) {
|
||
+ return true
|
||
+ }
|
||
+
|
||
+ return false
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string {
|
||
+ for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
|
||
+ tmp := fmt.Sprintf("%s-%d", id, count+1)
|
||
+
|
||
+ if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
|
||
+ r.headingIDs[id] = count + 1
|
||
+ id = tmp
|
||
+ } else {
|
||
+ id = id + "-1"
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if _, found := r.headingIDs[id]; !found {
|
||
+ r.headingIDs[id] = 0
|
||
+ }
|
||
+
|
||
+ return id
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte {
|
||
+ if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
|
||
+ newDest := r.AbsolutePrefix
|
||
+ if link[0] != '/' {
|
||
+ newDest += "/"
|
||
+ }
|
||
+ newDest += string(link)
|
||
+ return []byte(newDest)
|
||
+ }
|
||
+ return link
|
||
+}
|
||
+
|
||
+func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string {
|
||
+ if isRelativeLink(link) {
|
||
+ return attrs
|
||
+ }
|
||
+ val := []string{}
|
||
+ if flags&NofollowLinks != 0 {
|
||
+ val = append(val, "nofollow")
|
||
+ }
|
||
+ if flags&NoreferrerLinks != 0 {
|
||
+ val = append(val, "noreferrer")
|
||
+ }
|
||
+ if flags&NoopenerLinks != 0 {
|
||
+ val = append(val, "noopener")
|
||
+ }
|
||
+ if flags&HrefTargetBlank != 0 {
|
||
+ attrs = append(attrs, "target=\"_blank\"")
|
||
+ }
|
||
+ if len(val) == 0 {
|
||
+ return attrs
|
||
+ }
|
||
+ attr := fmt.Sprintf("rel=%q", strings.Join(val, " "))
|
||
+ return append(attrs, attr)
|
||
+}
|
||
+
|
||
+func isMailto(link []byte) bool {
|
||
+ return bytes.HasPrefix(link, []byte("mailto:"))
|
||
+}
|
||
+
|
||
+func needSkipLink(flags HTMLFlags, dest []byte) bool {
|
||
+ if flags&SkipLinks != 0 {
|
||
+ return true
|
||
+ }
|
||
+ return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
|
||
+}
|
||
+
|
||
+func isSmartypantable(node *Node) bool {
|
||
+ pt := node.Parent.Type
|
||
+ return pt != Link && pt != CodeBlock && pt != Code
|
||
+}
|
||
+
|
||
+func appendLanguageAttr(attrs []string, info []byte) []string {
|
||
+ if len(info) == 0 {
|
||
+ return attrs
|
||
+ }
|
||
+ endOfLang := bytes.IndexAny(info, "\t ")
|
||
+ if endOfLang < 0 {
|
||
+ endOfLang = len(info)
|
||
+ }
|
||
+ return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang]))
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) {
|
||
+ w.Write(name)
|
||
+ if len(attrs) > 0 {
|
||
+ w.Write(spaceBytes)
|
||
+ w.Write([]byte(strings.Join(attrs, " ")))
|
||
+ }
|
||
+ w.Write(gtBytes)
|
||
+ r.lastOutputLen = 1
|
||
+}
|
||
+
|
||
+func footnoteRef(prefix string, node *Node) []byte {
|
||
+ urlFrag := prefix + string(slugify(node.Destination))
|
||
+ anchor := fmt.Sprintf(`<a href="#fn:%s">%d</a>`, urlFrag, node.NoteID)
|
||
+ return []byte(fmt.Sprintf(`<sup class="footnote-ref" id="fnref:%s">%s</sup>`, urlFrag, anchor))
|
||
+}
|
||
+
|
||
+func footnoteItem(prefix string, slug []byte) []byte {
|
||
+ return []byte(fmt.Sprintf(`<li id="fn:%s%s">`, prefix, slug))
|
||
+}
|
||
+
|
||
+func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte {
|
||
+ const format = ` <a class="footnote-return" href="#fnref:%s%s">%s</a>`
|
||
+ return []byte(fmt.Sprintf(format, prefix, slug, returnLink))
|
||
+}
|
||
+
|
||
+func itemOpenCR(node *Node) bool {
|
||
+ if node.Prev == nil {
|
||
+ return false
|
||
+ }
|
||
+ ld := node.Parent.ListData
|
||
+ return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0
|
||
+}
|
||
+
|
||
+func skipParagraphTags(node *Node) bool {
|
||
+ grandparent := node.Parent.Parent
|
||
+ if grandparent == nil || grandparent.Type != List {
|
||
+ return false
|
||
+ }
|
||
+ tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0
|
||
+ return grandparent.Type == List && tightOrTerm
|
||
+}
|
||
+
|
||
+func cellAlignment(align CellAlignFlags) string {
|
||
+ switch align {
|
||
+ case TableAlignmentLeft:
|
||
+ return "left"
|
||
+ case TableAlignmentRight:
|
||
+ return "right"
|
||
+ case TableAlignmentCenter:
|
||
+ return "center"
|
||
+ default:
|
||
+ return ""
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) out(w io.Writer, text []byte) {
|
||
+ if r.disableTags > 0 {
|
||
+ w.Write(htmlTagRe.ReplaceAll(text, []byte{}))
|
||
+ } else {
|
||
+ w.Write(text)
|
||
+ }
|
||
+ r.lastOutputLen = len(text)
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) cr(w io.Writer) {
|
||
+ if r.lastOutputLen > 0 {
|
||
+ r.out(w, nlBytes)
|
||
+ }
|
||
+}
|
||
+
|
||
+var (
|
||
+ nlBytes = []byte{'\n'}
|
||
+ gtBytes = []byte{'>'}
|
||
+ spaceBytes = []byte{' '}
|
||
+)
|
||
+
|
||
+var (
|
||
+ brTag = []byte("<br>")
|
||
+ brXHTMLTag = []byte("<br />")
|
||
+ emTag = []byte("<em>")
|
||
+ emCloseTag = []byte("</em>")
|
||
+ strongTag = []byte("<strong>")
|
||
+ strongCloseTag = []byte("</strong>")
|
||
+ delTag = []byte("<del>")
|
||
+ delCloseTag = []byte("</del>")
|
||
+ ttTag = []byte("<tt>")
|
||
+ ttCloseTag = []byte("</tt>")
|
||
+ aTag = []byte("<a")
|
||
+ aCloseTag = []byte("</a>")
|
||
+ preTag = []byte("<pre>")
|
||
+ preCloseTag = []byte("</pre>")
|
||
+ codeTag = []byte("<code>")
|
||
+ codeCloseTag = []byte("</code>")
|
||
+ pTag = []byte("<p>")
|
||
+ pCloseTag = []byte("</p>")
|
||
+ blockquoteTag = []byte("<blockquote>")
|
||
+ blockquoteCloseTag = []byte("</blockquote>")
|
||
+ hrTag = []byte("<hr>")
|
||
+ hrXHTMLTag = []byte("<hr />")
|
||
+ ulTag = []byte("<ul>")
|
||
+ ulCloseTag = []byte("</ul>")
|
||
+ olTag = []byte("<ol>")
|
||
+ olCloseTag = []byte("</ol>")
|
||
+ dlTag = []byte("<dl>")
|
||
+ dlCloseTag = []byte("</dl>")
|
||
+ liTag = []byte("<li>")
|
||
+ liCloseTag = []byte("</li>")
|
||
+ ddTag = []byte("<dd>")
|
||
+ ddCloseTag = []byte("</dd>")
|
||
+ dtTag = []byte("<dt>")
|
||
+ dtCloseTag = []byte("</dt>")
|
||
+ tableTag = []byte("<table>")
|
||
+ tableCloseTag = []byte("</table>")
|
||
+ tdTag = []byte("<td")
|
||
+ tdCloseTag = []byte("</td>")
|
||
+ thTag = []byte("<th")
|
||
+ thCloseTag = []byte("</th>")
|
||
+ theadTag = []byte("<thead>")
|
||
+ theadCloseTag = []byte("</thead>")
|
||
+ tbodyTag = []byte("<tbody>")
|
||
+ tbodyCloseTag = []byte("</tbody>")
|
||
+ trTag = []byte("<tr>")
|
||
+ trCloseTag = []byte("</tr>")
|
||
+ h1Tag = []byte("<h1")
|
||
+ h1CloseTag = []byte("</h1>")
|
||
+ h2Tag = []byte("<h2")
|
||
+ h2CloseTag = []byte("</h2>")
|
||
+ h3Tag = []byte("<h3")
|
||
+ h3CloseTag = []byte("</h3>")
|
||
+ h4Tag = []byte("<h4")
|
||
+ h4CloseTag = []byte("</h4>")
|
||
+ h5Tag = []byte("<h5")
|
||
+ h5CloseTag = []byte("</h5>")
|
||
+ h6Tag = []byte("<h6")
|
||
+ h6CloseTag = []byte("</h6>")
|
||
+
|
||
+ footnotesDivBytes = []byte("\n<div class=\"footnotes\">\n\n")
|
||
+ footnotesCloseDivBytes = []byte("\n</div>\n")
|
||
+)
|
||
+
|
||
+func headingTagsFromLevel(level int) ([]byte, []byte) {
|
||
+ if level <= 1 {
|
||
+ return h1Tag, h1CloseTag
|
||
+ }
|
||
+ switch level {
|
||
+ case 2:
|
||
+ return h2Tag, h2CloseTag
|
||
+ case 3:
|
||
+ return h3Tag, h3CloseTag
|
||
+ case 4:
|
||
+ return h4Tag, h4CloseTag
|
||
+ case 5:
|
||
+ return h5Tag, h5CloseTag
|
||
+ }
|
||
+ return h6Tag, h6CloseTag
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) outHRTag(w io.Writer) {
|
||
+ if r.Flags&UseXHTML == 0 {
|
||
+ r.out(w, hrTag)
|
||
+ } else {
|
||
+ r.out(w, hrXHTMLTag)
|
||
+ }
|
||
+}
|
||
+
|
||
+// RenderNode is a default renderer of a single node of a syntax tree. For
|
||
+// block nodes it will be called twice: first time with entering=true, second
|
||
+// time with entering=false, so that it could know when it's working on an open
|
||
+// tag and when on close. It writes the result to w.
|
||
+//
|
||
+// The return value is a way to tell the calling walker to adjust its walk
|
||
+// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
|
||
+// can ask the walker to skip a subtree of this node by returning SkipChildren.
|
||
+// The typical behavior is to return GoToNext, which asks for the usual
|
||
+// traversal to the next node.
|
||
+func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus {
|
||
+ attrs := []string{}
|
||
+ switch node.Type {
|
||
+ case Text:
|
||
+ if r.Flags&Smartypants != 0 {
|
||
+ var tmp bytes.Buffer
|
||
+ escapeHTML(&tmp, node.Literal)
|
||
+ r.sr.Process(w, tmp.Bytes())
|
||
+ } else {
|
||
+ if node.Parent.Type == Link {
|
||
+ escLink(w, node.Literal)
|
||
+ } else {
|
||
+ escapeHTML(w, node.Literal)
|
||
+ }
|
||
+ }
|
||
+ case Softbreak:
|
||
+ r.cr(w)
|
||
+ // TODO: make it configurable via out(renderer.softbreak)
|
||
+ case Hardbreak:
|
||
+ if r.Flags&UseXHTML == 0 {
|
||
+ r.out(w, brTag)
|
||
+ } else {
|
||
+ r.out(w, brXHTMLTag)
|
||
+ }
|
||
+ r.cr(w)
|
||
+ case Emph:
|
||
+ if entering {
|
||
+ r.out(w, emTag)
|
||
+ } else {
|
||
+ r.out(w, emCloseTag)
|
||
+ }
|
||
+ case Strong:
|
||
+ if entering {
|
||
+ r.out(w, strongTag)
|
||
+ } else {
|
||
+ r.out(w, strongCloseTag)
|
||
+ }
|
||
+ case Del:
|
||
+ if entering {
|
||
+ r.out(w, delTag)
|
||
+ } else {
|
||
+ r.out(w, delCloseTag)
|
||
+ }
|
||
+ case HTMLSpan:
|
||
+ if r.Flags&SkipHTML != 0 {
|
||
+ break
|
||
+ }
|
||
+ r.out(w, node.Literal)
|
||
+ case Link:
|
||
+ // mark it but don't link it if it is not a safe link: no smartypants
|
||
+ dest := node.LinkData.Destination
|
||
+ if needSkipLink(r.Flags, dest) {
|
||
+ if entering {
|
||
+ r.out(w, ttTag)
|
||
+ } else {
|
||
+ r.out(w, ttCloseTag)
|
||
+ }
|
||
+ } else {
|
||
+ if entering {
|
||
+ dest = r.addAbsPrefix(dest)
|
||
+ var hrefBuf bytes.Buffer
|
||
+ hrefBuf.WriteString("href=\"")
|
||
+ escLink(&hrefBuf, dest)
|
||
+ hrefBuf.WriteByte('"')
|
||
+ attrs = append(attrs, hrefBuf.String())
|
||
+ if node.NoteID != 0 {
|
||
+ r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
|
||
+ break
|
||
+ }
|
||
+ attrs = appendLinkAttrs(attrs, r.Flags, dest)
|
||
+ if len(node.LinkData.Title) > 0 {
|
||
+ var titleBuff bytes.Buffer
|
||
+ titleBuff.WriteString("title=\"")
|
||
+ escapeHTML(&titleBuff, node.LinkData.Title)
|
||
+ titleBuff.WriteByte('"')
|
||
+ attrs = append(attrs, titleBuff.String())
|
||
+ }
|
||
+ r.tag(w, aTag, attrs)
|
||
+ } else {
|
||
+ if node.NoteID != 0 {
|
||
+ break
|
||
+ }
|
||
+ r.out(w, aCloseTag)
|
||
+ }
|
||
+ }
|
||
+ case Image:
|
||
+ if r.Flags&SkipImages != 0 {
|
||
+ return SkipChildren
|
||
+ }
|
||
+ if entering {
|
||
+ dest := node.LinkData.Destination
|
||
+ dest = r.addAbsPrefix(dest)
|
||
+ if r.disableTags == 0 {
|
||
+ //if options.safe && potentiallyUnsafe(dest) {
|
||
+ //out(w, `<img src="" alt="`)
|
||
+ //} else {
|
||
+ r.out(w, []byte(`<img src="`))
|
||
+ escLink(w, dest)
|
||
+ r.out(w, []byte(`" alt="`))
|
||
+ //}
|
||
+ }
|
||
+ r.disableTags++
|
||
+ } else {
|
||
+ r.disableTags--
|
||
+ if r.disableTags == 0 {
|
||
+ if node.LinkData.Title != nil {
|
||
+ r.out(w, []byte(`" title="`))
|
||
+ escapeHTML(w, node.LinkData.Title)
|
||
+ }
|
||
+ r.out(w, []byte(`" />`))
|
||
+ }
|
||
+ }
|
||
+ case Code:
|
||
+ r.out(w, codeTag)
|
||
+ escapeAllHTML(w, node.Literal)
|
||
+ r.out(w, codeCloseTag)
|
||
+ case Document:
|
||
+ break
|
||
+ case Paragraph:
|
||
+ if skipParagraphTags(node) {
|
||
+ break
|
||
+ }
|
||
+ if entering {
|
||
+ // TODO: untangle this clusterfuck about when the newlines need
|
||
+ // to be added and when not.
|
||
+ if node.Prev != nil {
|
||
+ switch node.Prev.Type {
|
||
+ case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule:
|
||
+ r.cr(w)
|
||
+ }
|
||
+ }
|
||
+ if node.Parent.Type == BlockQuote && node.Prev == nil {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ r.out(w, pTag)
|
||
+ } else {
|
||
+ r.out(w, pCloseTag)
|
||
+ if !(node.Parent.Type == Item && node.Next == nil) {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ }
|
||
+ case BlockQuote:
|
||
+ if entering {
|
||
+ r.cr(w)
|
||
+ r.out(w, blockquoteTag)
|
||
+ } else {
|
||
+ r.out(w, blockquoteCloseTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case HTMLBlock:
|
||
+ if r.Flags&SkipHTML != 0 {
|
||
+ break
|
||
+ }
|
||
+ r.cr(w)
|
||
+ r.out(w, node.Literal)
|
||
+ r.cr(w)
|
||
+ case Heading:
|
||
+ headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level
|
||
+ openTag, closeTag := headingTagsFromLevel(headingLevel)
|
||
+ if entering {
|
||
+ if node.IsTitleblock {
|
||
+ attrs = append(attrs, `class="title"`)
|
||
+ }
|
||
+ if node.HeadingID != "" {
|
||
+ id := r.ensureUniqueHeadingID(node.HeadingID)
|
||
+ if r.HeadingIDPrefix != "" {
|
||
+ id = r.HeadingIDPrefix + id
|
||
+ }
|
||
+ if r.HeadingIDSuffix != "" {
|
||
+ id = id + r.HeadingIDSuffix
|
||
+ }
|
||
+ attrs = append(attrs, fmt.Sprintf(`id="%s"`, id))
|
||
+ }
|
||
+ r.cr(w)
|
||
+ r.tag(w, openTag, attrs)
|
||
+ } else {
|
||
+ r.out(w, closeTag)
|
||
+ if !(node.Parent.Type == Item && node.Next == nil) {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ }
|
||
+ case HorizontalRule:
|
||
+ r.cr(w)
|
||
+ r.outHRTag(w)
|
||
+ r.cr(w)
|
||
+ case List:
|
||
+ openTag := ulTag
|
||
+ closeTag := ulCloseTag
|
||
+ if node.ListFlags&ListTypeOrdered != 0 {
|
||
+ openTag = olTag
|
||
+ closeTag = olCloseTag
|
||
+ }
|
||
+ if node.ListFlags&ListTypeDefinition != 0 {
|
||
+ openTag = dlTag
|
||
+ closeTag = dlCloseTag
|
||
+ }
|
||
+ if entering {
|
||
+ if node.IsFootnotesList {
|
||
+ r.out(w, footnotesDivBytes)
|
||
+ r.outHRTag(w)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ r.cr(w)
|
||
+ if node.Parent.Type == Item && node.Parent.Parent.Tight {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ r.tag(w, openTag[:len(openTag)-1], attrs)
|
||
+ r.cr(w)
|
||
+ } else {
|
||
+ r.out(w, closeTag)
|
||
+ //cr(w)
|
||
+ //if node.parent.Type != Item {
|
||
+ // cr(w)
|
||
+ //}
|
||
+ if node.Parent.Type == Item && node.Next != nil {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ if node.Parent.Type == Document || node.Parent.Type == BlockQuote {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ if node.IsFootnotesList {
|
||
+ r.out(w, footnotesCloseDivBytes)
|
||
+ }
|
||
+ }
|
||
+ case Item:
|
||
+ openTag := liTag
|
||
+ closeTag := liCloseTag
|
||
+ if node.ListFlags&ListTypeDefinition != 0 {
|
||
+ openTag = ddTag
|
||
+ closeTag = ddCloseTag
|
||
+ }
|
||
+ if node.ListFlags&ListTypeTerm != 0 {
|
||
+ openTag = dtTag
|
||
+ closeTag = dtCloseTag
|
||
+ }
|
||
+ if entering {
|
||
+ if itemOpenCR(node) {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ if node.ListData.RefLink != nil {
|
||
+ slug := slugify(node.ListData.RefLink)
|
||
+ r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug))
|
||
+ break
|
||
+ }
|
||
+ r.out(w, openTag)
|
||
+ } else {
|
||
+ if node.ListData.RefLink != nil {
|
||
+ slug := slugify(node.ListData.RefLink)
|
||
+ if r.Flags&FootnoteReturnLinks != 0 {
|
||
+ r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug))
|
||
+ }
|
||
+ }
|
||
+ r.out(w, closeTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case CodeBlock:
|
||
+ attrs = appendLanguageAttr(attrs, node.Info)
|
||
+ r.cr(w)
|
||
+ r.out(w, preTag)
|
||
+ r.tag(w, codeTag[:len(codeTag)-1], attrs)
|
||
+ escapeAllHTML(w, node.Literal)
|
||
+ r.out(w, codeCloseTag)
|
||
+ r.out(w, preCloseTag)
|
||
+ if node.Parent.Type != Item {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case Table:
|
||
+ if entering {
|
||
+ r.cr(w)
|
||
+ r.out(w, tableTag)
|
||
+ } else {
|
||
+ r.out(w, tableCloseTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case TableCell:
|
||
+ openTag := tdTag
|
||
+ closeTag := tdCloseTag
|
||
+ if node.IsHeader {
|
||
+ openTag = thTag
|
||
+ closeTag = thCloseTag
|
||
+ }
|
||
+ if entering {
|
||
+ align := cellAlignment(node.Align)
|
||
+ if align != "" {
|
||
+ attrs = append(attrs, fmt.Sprintf(`align="%s"`, align))
|
||
+ }
|
||
+ if node.Prev == nil {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ r.tag(w, openTag, attrs)
|
||
+ } else {
|
||
+ r.out(w, closeTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case TableHead:
|
||
+ if entering {
|
||
+ r.cr(w)
|
||
+ r.out(w, theadTag)
|
||
+ } else {
|
||
+ r.out(w, theadCloseTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case TableBody:
|
||
+ if entering {
|
||
+ r.cr(w)
|
||
+ r.out(w, tbodyTag)
|
||
+ // XXX: this is to adhere to a rather silly test. Should fix test.
|
||
+ if node.FirstChild == nil {
|
||
+ r.cr(w)
|
||
+ }
|
||
+ } else {
|
||
+ r.out(w, tbodyCloseTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ case TableRow:
|
||
+ if entering {
|
||
+ r.cr(w)
|
||
+ r.out(w, trTag)
|
||
+ } else {
|
||
+ r.out(w, trCloseTag)
|
||
+ r.cr(w)
|
||
+ }
|
||
+ default:
|
||
+ panic("Unknown node type " + node.Type.String())
|
||
+ }
|
||
+ return GoToNext
|
||
+}
|
||
+
|
||
+// RenderHeader writes HTML document preamble and TOC if requested.
|
||
+func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) {
|
||
+ r.writeDocumentHeader(w)
|
||
+ if r.Flags&TOC != 0 {
|
||
+ r.writeTOC(w, ast)
|
||
+ }
|
||
+}
|
||
+
|
||
+// RenderFooter writes HTML document footer.
|
||
+func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) {
|
||
+ if r.Flags&CompletePage == 0 {
|
||
+ return
|
||
+ }
|
||
+ io.WriteString(w, "\n</body>\n</html>\n")
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) {
|
||
+ if r.Flags&CompletePage == 0 {
|
||
+ return
|
||
+ }
|
||
+ ending := ""
|
||
+ if r.Flags&UseXHTML != 0 {
|
||
+ io.WriteString(w, "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
|
||
+ io.WriteString(w, "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
|
||
+ io.WriteString(w, "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
|
||
+ ending = " /"
|
||
+ } else {
|
||
+ io.WriteString(w, "<!DOCTYPE html>\n")
|
||
+ io.WriteString(w, "<html>\n")
|
||
+ }
|
||
+ io.WriteString(w, "<head>\n")
|
||
+ io.WriteString(w, " <title>")
|
||
+ if r.Flags&Smartypants != 0 {
|
||
+ r.sr.Process(w, []byte(r.Title))
|
||
+ } else {
|
||
+ escapeHTML(w, []byte(r.Title))
|
||
+ }
|
||
+ io.WriteString(w, "</title>\n")
|
||
+ io.WriteString(w, " <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
|
||
+ io.WriteString(w, Version)
|
||
+ io.WriteString(w, "\"")
|
||
+ io.WriteString(w, ending)
|
||
+ io.WriteString(w, ">\n")
|
||
+ io.WriteString(w, " <meta charset=\"utf-8\"")
|
||
+ io.WriteString(w, ending)
|
||
+ io.WriteString(w, ">\n")
|
||
+ if r.CSS != "" {
|
||
+ io.WriteString(w, " <link rel=\"stylesheet\" type=\"text/css\" href=\"")
|
||
+ escapeHTML(w, []byte(r.CSS))
|
||
+ io.WriteString(w, "\"")
|
||
+ io.WriteString(w, ending)
|
||
+ io.WriteString(w, ">\n")
|
||
+ }
|
||
+ if r.Icon != "" {
|
||
+ io.WriteString(w, " <link rel=\"icon\" type=\"image/x-icon\" href=\"")
|
||
+ escapeHTML(w, []byte(r.Icon))
|
||
+ io.WriteString(w, "\"")
|
||
+ io.WriteString(w, ending)
|
||
+ io.WriteString(w, ">\n")
|
||
+ }
|
||
+ io.WriteString(w, "</head>\n")
|
||
+ io.WriteString(w, "<body>\n\n")
|
||
+}
|
||
+
|
||
+func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) {
|
||
+ buf := bytes.Buffer{}
|
||
+
|
||
+ inHeading := false
|
||
+ tocLevel := 0
|
||
+ headingCount := 0
|
||
+
|
||
+ ast.Walk(func(node *Node, entering bool) WalkStatus {
|
||
+ if node.Type == Heading && !node.HeadingData.IsTitleblock {
|
||
+ inHeading = entering
|
||
+ if entering {
|
||
+ node.HeadingID = fmt.Sprintf("toc_%d", headingCount)
|
||
+ if node.Level == tocLevel {
|
||
+ buf.WriteString("</li>\n\n<li>")
|
||
+ } else if node.Level < tocLevel {
|
||
+ for node.Level < tocLevel {
|
||
+ tocLevel--
|
||
+ buf.WriteString("</li>\n</ul>")
|
||
+ }
|
||
+ buf.WriteString("</li>\n\n<li>")
|
||
+ } else {
|
||
+ for node.Level > tocLevel {
|
||
+ tocLevel++
|
||
+ buf.WriteString("\n<ul>\n<li>")
|
||
+ }
|
||
+ }
|
||
+
|
||
+ fmt.Fprintf(&buf, `<a href="#toc_%d">`, headingCount)
|
||
+ headingCount++
|
||
+ } else {
|
||
+ buf.WriteString("</a>")
|
||
+ }
|
||
+ return GoToNext
|
||
+ }
|
||
+
|
||
+ if inHeading {
|
||
+ return r.RenderNode(&buf, node, entering)
|
||
+ }
|
||
+
|
||
+ return GoToNext
|
||
+ })
|
||
+
|
||
+ for ; tocLevel > 0; tocLevel-- {
|
||
+ buf.WriteString("</li>\n</ul>")
|
||
+ }
|
||
+
|
||
+ if buf.Len() > 0 {
|
||
+ io.WriteString(w, "<nav>\n")
|
||
+ w.Write(buf.Bytes())
|
||
+ io.WriteString(w, "\n\n</nav>\n")
|
||
+ }
|
||
+ r.lastOutputLen = buf.Len()
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go
|
||
new file mode 100644
|
||
index 000000000000..d45bd941726e
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/inline.go
|
||
@@ -0,0 +1,1228 @@
|
||
+//
|
||
+// Blackfriday Markdown Processor
|
||
+// Available at http://github.com/russross/blackfriday
|
||
+//
|
||
+// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||
+// Distributed under the Simplified BSD License.
|
||
+// See README.md for details.
|
||
+//
|
||
+
|
||
+//
|
||
+// Functions to parse inline elements.
|
||
+//
|
||
+
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "regexp"
|
||
+ "strconv"
|
||
+)
|
||
+
|
||
+var (
|
||
+ urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
|
||
+ anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
|
||
+
|
||
+ // https://www.w3.org/TR/html5/syntax.html#character-references
|
||
+ // highest unicode code point in 17 planes (2^20): 1,114,112d =
|
||
+ // 7 dec digits or 6 hex digits
|
||
+ // named entity references can be 2-31 characters with stuff like <
|
||
+ // at one end and ∳ at the other. There
|
||
+ // are also sometimes numbers at the end, although this isn't inherent
|
||
+ // in the specification; there are never numbers anywhere else in
|
||
+ // current character references, though; see ¾ and ▒, etc.
|
||
+ // https://www.w3.org/TR/html5/syntax.html#named-character-references
|
||
+ //
|
||
+ // entity := "&" (named group | number ref) ";"
|
||
+ // named group := [a-zA-Z]{2,31}[0-9]{0,2}
|
||
+ // number ref := "#" (dec ref | hex ref)
|
||
+ // dec ref := [0-9]{1,7}
|
||
+ // hex ref := ("x" | "X") [0-9a-fA-F]{1,6}
|
||
+ htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`)
|
||
+)
|
||
+
|
||
+// Functions to parse text within a block
|
||
+// Each function returns the number of chars taken care of
|
||
+// data is the complete block being rendered
|
||
+// offset is the number of valid chars before the current cursor
|
||
+
|
||
+func (p *Markdown) inline(currBlock *Node, data []byte) {
|
||
+ // handlers might call us recursively: enforce a maximum depth
|
||
+ if p.nesting >= p.maxNesting || len(data) == 0 {
|
||
+ return
|
||
+ }
|
||
+ p.nesting++
|
||
+ beg, end := 0, 0
|
||
+ for end < len(data) {
|
||
+ handler := p.inlineCallback[data[end]]
|
||
+ if handler != nil {
|
||
+ if consumed, node := handler(p, data, end); consumed == 0 {
|
||
+ // No action from the callback.
|
||
+ end++
|
||
+ } else {
|
||
+ // Copy inactive chars into the output.
|
||
+ currBlock.AppendChild(text(data[beg:end]))
|
||
+ if node != nil {
|
||
+ currBlock.AppendChild(node)
|
||
+ }
|
||
+ // Skip past whatever the callback used.
|
||
+ beg = end + consumed
|
||
+ end = beg
|
||
+ }
|
||
+ } else {
|
||
+ end++
|
||
+ }
|
||
+ }
|
||
+ if beg < len(data) {
|
||
+ if data[end-1] == '\n' {
|
||
+ end--
|
||
+ }
|
||
+ currBlock.AppendChild(text(data[beg:end]))
|
||
+ }
|
||
+ p.nesting--
|
||
+}
|
||
+
|
||
+// single and double emphasis parsing
|
||
+func emphasis(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ data = data[offset:]
|
||
+ c := data[0]
|
||
+
|
||
+ if len(data) > 2 && data[1] != c {
|
||
+ // whitespace cannot follow an opening emphasis;
|
||
+ // strikethrough only takes two characters '~~'
|
||
+ if c == '~' || isspace(data[1]) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ ret, node := helperEmphasis(p, data[1:], c)
|
||
+ if ret == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ return ret + 1, node
|
||
+ }
|
||
+
|
||
+ if len(data) > 3 && data[1] == c && data[2] != c {
|
||
+ if isspace(data[2]) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ ret, node := helperDoubleEmphasis(p, data[2:], c)
|
||
+ if ret == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ return ret + 2, node
|
||
+ }
|
||
+
|
||
+ if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {
|
||
+ if c == '~' || isspace(data[3]) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ ret, node := helperTripleEmphasis(p, data, 3, c)
|
||
+ if ret == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ return ret + 3, node
|
||
+ }
|
||
+
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ data = data[offset:]
|
||
+
|
||
+ nb := 0
|
||
+
|
||
+ // count the number of backticks in the delimiter
|
||
+ for nb < len(data) && data[nb] == '`' {
|
||
+ nb++
|
||
+ }
|
||
+
|
||
+ // find the next delimiter
|
||
+ i, end := 0, 0
|
||
+ for end = nb; end < len(data) && i < nb; end++ {
|
||
+ if data[end] == '`' {
|
||
+ i++
|
||
+ } else {
|
||
+ i = 0
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // no matching delimiter?
|
||
+ if i < nb && end >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ // trim outside whitespace
|
||
+ fBegin := nb
|
||
+ for fBegin < end && data[fBegin] == ' ' {
|
||
+ fBegin++
|
||
+ }
|
||
+
|
||
+ fEnd := end - nb
|
||
+ for fEnd > fBegin && data[fEnd-1] == ' ' {
|
||
+ fEnd--
|
||
+ }
|
||
+
|
||
+ // render the code span
|
||
+ if fBegin != fEnd {
|
||
+ code := NewNode(Code)
|
||
+ code.Literal = data[fBegin:fEnd]
|
||
+ return end, code
|
||
+ }
|
||
+
|
||
+ return end, nil
|
||
+}
|
||
+
|
||
+// newline preceded by two spaces becomes <br>
|
||
+func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ origOffset := offset
|
||
+ for offset < len(data) && data[offset] == ' ' {
|
||
+ offset++
|
||
+ }
|
||
+
|
||
+ if offset < len(data) && data[offset] == '\n' {
|
||
+ if offset-origOffset >= 2 {
|
||
+ return offset - origOffset + 1, NewNode(Hardbreak)
|
||
+ }
|
||
+ return offset - origOffset, nil
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+// newline without two spaces works when HardLineBreak is enabled
|
||
+func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ if p.extensions&HardLineBreak != 0 {
|
||
+ return 1, NewNode(Hardbreak)
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+type linkType int
|
||
+
|
||
+const (
|
||
+ linkNormal linkType = iota
|
||
+ linkImg
|
||
+ linkDeferredFootnote
|
||
+ linkInlineFootnote
|
||
+)
|
||
+
|
||
+func isReferenceStyleLink(data []byte, pos int, t linkType) bool {
|
||
+ if t == linkDeferredFootnote {
|
||
+ return false
|
||
+ }
|
||
+ return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^'
|
||
+}
|
||
+
|
||
+func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ if offset < len(data)-1 && data[offset+1] == '[' {
|
||
+ return link(p, data, offset)
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ if offset < len(data)-1 && data[offset+1] == '[' {
|
||
+ return link(p, data, offset)
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+// '[': parse a link or an image or a footnote
|
||
+func link(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ // no links allowed inside regular links, footnote, and deferred footnotes
|
||
+ if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ var t linkType
|
||
+ switch {
|
||
+ // special case: ![^text] == deferred footnote (that follows something with
|
||
+ // an exclamation point)
|
||
+ case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^':
|
||
+ t = linkDeferredFootnote
|
||
+ // ![alt] == image
|
||
+ case offset >= 0 && data[offset] == '!':
|
||
+ t = linkImg
|
||
+ offset++
|
||
+ // ^[text] == inline footnote
|
||
+ // [^refId] == deferred footnote
|
||
+ case p.extensions&Footnotes != 0:
|
||
+ if offset >= 0 && data[offset] == '^' {
|
||
+ t = linkInlineFootnote
|
||
+ offset++
|
||
+ } else if len(data)-1 > offset && data[offset+1] == '^' {
|
||
+ t = linkDeferredFootnote
|
||
+ }
|
||
+ // [text] == regular link
|
||
+ default:
|
||
+ t = linkNormal
|
||
+ }
|
||
+
|
||
+ data = data[offset:]
|
||
+
|
||
+ var (
|
||
+ i = 1
|
||
+ noteID int
|
||
+ title, link, altContent []byte
|
||
+ textHasNl = false
|
||
+ )
|
||
+
|
||
+ if t == linkDeferredFootnote {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // look for the matching closing bracket
|
||
+ for level := 1; level > 0 && i < len(data); i++ {
|
||
+ switch {
|
||
+ case data[i] == '\n':
|
||
+ textHasNl = true
|
||
+
|
||
+ case isBackslashEscaped(data, i):
|
||
+ continue
|
||
+
|
||
+ case data[i] == '[':
|
||
+ level++
|
||
+
|
||
+ case data[i] == ']':
|
||
+ level--
|
||
+ if level <= 0 {
|
||
+ i-- // compensate for extra i++ in for loop
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if i >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ txtE := i
|
||
+ i++
|
||
+ var footnoteNode *Node
|
||
+
|
||
+ // skip any amount of whitespace or newline
|
||
+ // (this is much more lax than original markdown syntax)
|
||
+ for i < len(data) && isspace(data[i]) {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // inline style link
|
||
+ switch {
|
||
+ case i < len(data) && data[i] == '(':
|
||
+ // skip initial whitespace
|
||
+ i++
|
||
+
|
||
+ for i < len(data) && isspace(data[i]) {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ linkB := i
|
||
+
|
||
+ // look for link end: ' " )
|
||
+ findlinkend:
|
||
+ for i < len(data) {
|
||
+ switch {
|
||
+ case data[i] == '\\':
|
||
+ i += 2
|
||
+
|
||
+ case data[i] == ')' || data[i] == '\'' || data[i] == '"':
|
||
+ break findlinkend
|
||
+
|
||
+ default:
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if i >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ linkE := i
|
||
+
|
||
+ // look for title end if present
|
||
+ titleB, titleE := 0, 0
|
||
+ if data[i] == '\'' || data[i] == '"' {
|
||
+ i++
|
||
+ titleB = i
|
||
+
|
||
+ findtitleend:
|
||
+ for i < len(data) {
|
||
+ switch {
|
||
+ case data[i] == '\\':
|
||
+ i += 2
|
||
+
|
||
+ case data[i] == ')':
|
||
+ break findtitleend
|
||
+
|
||
+ default:
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if i >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ // skip whitespace after title
|
||
+ titleE = i - 1
|
||
+ for titleE > titleB && isspace(data[titleE]) {
|
||
+ titleE--
|
||
+ }
|
||
+
|
||
+ // check for closing quote presence
|
||
+ if data[titleE] != '\'' && data[titleE] != '"' {
|
||
+ titleB, titleE = 0, 0
|
||
+ linkE = i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // remove whitespace at the end of the link
|
||
+ for linkE > linkB && isspace(data[linkE-1]) {
|
||
+ linkE--
|
||
+ }
|
||
+
|
||
+ // remove optional angle brackets around the link
|
||
+ if data[linkB] == '<' {
|
||
+ linkB++
|
||
+ }
|
||
+ if data[linkE-1] == '>' {
|
||
+ linkE--
|
||
+ }
|
||
+
|
||
+ // build escaped link and title
|
||
+ if linkE > linkB {
|
||
+ link = data[linkB:linkE]
|
||
+ }
|
||
+
|
||
+ if titleE > titleB {
|
||
+ title = data[titleB:titleE]
|
||
+ }
|
||
+
|
||
+ i++
|
||
+
|
||
+ // reference style link
|
||
+ case isReferenceStyleLink(data, i, t):
|
||
+ var id []byte
|
||
+ altContentConsidered := false
|
||
+
|
||
+ // look for the id
|
||
+ i++
|
||
+ linkB := i
|
||
+ for i < len(data) && data[i] != ']' {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ linkE := i
|
||
+
|
||
+ // find the reference
|
||
+ if linkB == linkE {
|
||
+ if textHasNl {
|
||
+ var b bytes.Buffer
|
||
+
|
||
+ for j := 1; j < txtE; j++ {
|
||
+ switch {
|
||
+ case data[j] != '\n':
|
||
+ b.WriteByte(data[j])
|
||
+ case data[j-1] != ' ':
|
||
+ b.WriteByte(' ')
|
||
+ }
|
||
+ }
|
||
+
|
||
+ id = b.Bytes()
|
||
+ } else {
|
||
+ id = data[1:txtE]
|
||
+ altContentConsidered = true
|
||
+ }
|
||
+ } else {
|
||
+ id = data[linkB:linkE]
|
||
+ }
|
||
+
|
||
+ // find the reference with matching id
|
||
+ lr, ok := p.getRef(string(id))
|
||
+ if !ok {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ // keep link and title from reference
|
||
+ link = lr.link
|
||
+ title = lr.title
|
||
+ if altContentConsidered {
|
||
+ altContent = lr.text
|
||
+ }
|
||
+ i++
|
||
+
|
||
+ // shortcut reference style link or reference or inline footnote
|
||
+ default:
|
||
+ var id []byte
|
||
+
|
||
+ // craft the id
|
||
+ if textHasNl {
|
||
+ var b bytes.Buffer
|
||
+
|
||
+ for j := 1; j < txtE; j++ {
|
||
+ switch {
|
||
+ case data[j] != '\n':
|
||
+ b.WriteByte(data[j])
|
||
+ case data[j-1] != ' ':
|
||
+ b.WriteByte(' ')
|
||
+ }
|
||
+ }
|
||
+
|
||
+ id = b.Bytes()
|
||
+ } else {
|
||
+ if t == linkDeferredFootnote {
|
||
+ id = data[2:txtE] // get rid of the ^
|
||
+ } else {
|
||
+ id = data[1:txtE]
|
||
+ }
|
||
+ }
|
||
+
|
||
+ footnoteNode = NewNode(Item)
|
||
+ if t == linkInlineFootnote {
|
||
+ // create a new reference
|
||
+ noteID = len(p.notes) + 1
|
||
+
|
||
+ var fragment []byte
|
||
+ if len(id) > 0 {
|
||
+ if len(id) < 16 {
|
||
+ fragment = make([]byte, len(id))
|
||
+ } else {
|
||
+ fragment = make([]byte, 16)
|
||
+ }
|
||
+ copy(fragment, slugify(id))
|
||
+ } else {
|
||
+ fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...)
|
||
+ }
|
||
+
|
||
+ ref := &reference{
|
||
+ noteID: noteID,
|
||
+ hasBlock: false,
|
||
+ link: fragment,
|
||
+ title: id,
|
||
+ footnote: footnoteNode,
|
||
+ }
|
||
+
|
||
+ p.notes = append(p.notes, ref)
|
||
+
|
||
+ link = ref.link
|
||
+ title = ref.title
|
||
+ } else {
|
||
+ // find the reference with matching id
|
||
+ lr, ok := p.getRef(string(id))
|
||
+ if !ok {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ if t == linkDeferredFootnote {
|
||
+ lr.noteID = len(p.notes) + 1
|
||
+ lr.footnote = footnoteNode
|
||
+ p.notes = append(p.notes, lr)
|
||
+ }
|
||
+
|
||
+ // keep link and title from reference
|
||
+ link = lr.link
|
||
+ // if inline footnote, title == footnote contents
|
||
+ title = lr.title
|
||
+ noteID = lr.noteID
|
||
+ }
|
||
+
|
||
+ // rewind the whitespace
|
||
+ i = txtE + 1
|
||
+ }
|
||
+
|
||
+ var uLink []byte
|
||
+ if t == linkNormal || t == linkImg {
|
||
+ if len(link) > 0 {
|
||
+ var uLinkBuf bytes.Buffer
|
||
+ unescapeText(&uLinkBuf, link)
|
||
+ uLink = uLinkBuf.Bytes()
|
||
+ }
|
||
+
|
||
+ // links need something to click on and somewhere to go
|
||
+ if len(uLink) == 0 || (t == linkNormal && txtE <= 1) {
|
||
+ return 0, nil
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // call the relevant rendering function
|
||
+ var linkNode *Node
|
||
+ switch t {
|
||
+ case linkNormal:
|
||
+ linkNode = NewNode(Link)
|
||
+ linkNode.Destination = normalizeURI(uLink)
|
||
+ linkNode.Title = title
|
||
+ if len(altContent) > 0 {
|
||
+ linkNode.AppendChild(text(altContent))
|
||
+ } else {
|
||
+ // links cannot contain other links, so turn off link parsing
|
||
+ // temporarily and recurse
|
||
+ insideLink := p.insideLink
|
||
+ p.insideLink = true
|
||
+ p.inline(linkNode, data[1:txtE])
|
||
+ p.insideLink = insideLink
|
||
+ }
|
||
+
|
||
+ case linkImg:
|
||
+ linkNode = NewNode(Image)
|
||
+ linkNode.Destination = uLink
|
||
+ linkNode.Title = title
|
||
+ linkNode.AppendChild(text(data[1:txtE]))
|
||
+ i++
|
||
+
|
||
+ case linkInlineFootnote, linkDeferredFootnote:
|
||
+ linkNode = NewNode(Link)
|
||
+ linkNode.Destination = link
|
||
+ linkNode.Title = title
|
||
+ linkNode.NoteID = noteID
|
||
+ linkNode.Footnote = footnoteNode
|
||
+ if t == linkInlineFootnote {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ default:
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ return i, linkNode
|
||
+}
|
||
+
|
||
+func (p *Markdown) inlineHTMLComment(data []byte) int {
|
||
+ if len(data) < 5 {
|
||
+ return 0
|
||
+ }
|
||
+ if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' {
|
||
+ return 0
|
||
+ }
|
||
+ i := 5
|
||
+ // scan for an end-of-comment marker, across lines if necessary
|
||
+ for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
|
||
+ i++
|
||
+ }
|
||
+ // no end-of-comment marker
|
||
+ if i >= len(data) {
|
||
+ return 0
|
||
+ }
|
||
+ return i + 1
|
||
+}
|
||
+
|
||
+func stripMailto(link []byte) []byte {
|
||
+ if bytes.HasPrefix(link, []byte("mailto://")) {
|
||
+ return link[9:]
|
||
+ } else if bytes.HasPrefix(link, []byte("mailto:")) {
|
||
+ return link[7:]
|
||
+ } else {
|
||
+ return link
|
||
+ }
|
||
+}
|
||
+
|
||
+// autolinkType specifies a kind of autolink that gets detected.
|
||
+type autolinkType int
|
||
+
|
||
+// These are the possible flag values for the autolink renderer.
|
||
+const (
|
||
+ notAutolink autolinkType = iota
|
||
+ normalAutolink
|
||
+ emailAutolink
|
||
+)
|
||
+
|
||
+// '<' when tags or autolinks are allowed
|
||
+func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ data = data[offset:]
|
||
+ altype, end := tagLength(data)
|
||
+ if size := p.inlineHTMLComment(data); size > 0 {
|
||
+ end = size
|
||
+ }
|
||
+ if end > 2 {
|
||
+ if altype != notAutolink {
|
||
+ var uLink bytes.Buffer
|
||
+ unescapeText(&uLink, data[1:end+1-2])
|
||
+ if uLink.Len() > 0 {
|
||
+ link := uLink.Bytes()
|
||
+ node := NewNode(Link)
|
||
+ node.Destination = link
|
||
+ if altype == emailAutolink {
|
||
+ node.Destination = append([]byte("mailto:"), link...)
|
||
+ }
|
||
+ node.AppendChild(text(stripMailto(link)))
|
||
+ return end, node
|
||
+ }
|
||
+ } else {
|
||
+ htmlTag := NewNode(HTMLSpan)
|
||
+ htmlTag.Literal = data[:end]
|
||
+ return end, htmlTag
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return end, nil
|
||
+}
|
||
+
|
||
+// '\\' backslash escape
|
||
+var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
|
||
+
|
||
+func escape(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ data = data[offset:]
|
||
+
|
||
+ if len(data) > 1 {
|
||
+ if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' {
|
||
+ return 2, NewNode(Hardbreak)
|
||
+ }
|
||
+ if bytes.IndexByte(escapeChars, data[1]) < 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ return 2, text(data[1:2])
|
||
+ }
|
||
+
|
||
+ return 2, nil
|
||
+}
|
||
+
|
||
+func unescapeText(ob *bytes.Buffer, src []byte) {
|
||
+ i := 0
|
||
+ for i < len(src) {
|
||
+ org := i
|
||
+ for i < len(src) && src[i] != '\\' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ if i > org {
|
||
+ ob.Write(src[org:i])
|
||
+ }
|
||
+
|
||
+ if i+1 >= len(src) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ ob.WriteByte(src[i+1])
|
||
+ i += 2
|
||
+ }
|
||
+}
|
||
+
|
||
+// '&' escaped when it doesn't belong to an entity
|
||
+// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
|
||
+func entity(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ data = data[offset:]
|
||
+
|
||
+ end := 1
|
||
+
|
||
+ if end < len(data) && data[end] == '#' {
|
||
+ end++
|
||
+ }
|
||
+
|
||
+ for end < len(data) && isalnum(data[end]) {
|
||
+ end++
|
||
+ }
|
||
+
|
||
+ if end < len(data) && data[end] == ';' {
|
||
+ end++ // real entity
|
||
+ } else {
|
||
+ return 0, nil // lone '&'
|
||
+ }
|
||
+
|
||
+ ent := data[:end]
|
||
+ // undo & escaping or it will be converted to &amp; by another
|
||
+ // escaper in the renderer
|
||
+ if bytes.Equal(ent, []byte("&")) {
|
||
+ ent = []byte{'&'}
|
||
+ }
|
||
+
|
||
+ return end, text(ent)
|
||
+}
|
||
+
|
||
+func linkEndsWithEntity(data []byte, linkEnd int) bool {
|
||
+ entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1)
|
||
+ return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd
|
||
+}
|
||
+
|
||
+// hasPrefixCaseInsensitive is a custom implementation of
|
||
+// strings.HasPrefix(strings.ToLower(s), prefix)
|
||
+// we rolled our own because ToLower pulls in a huge machinery of lowercasing
|
||
+// anything from Unicode and that's very slow. Since this func will only be
|
||
+// used on ASCII protocol prefixes, we can take shortcuts.
|
||
+func hasPrefixCaseInsensitive(s, prefix []byte) bool {
|
||
+ if len(s) < len(prefix) {
|
||
+ return false
|
||
+ }
|
||
+ delta := byte('a' - 'A')
|
||
+ for i, b := range prefix {
|
||
+ if b != s[i] && b != s[i]+delta {
|
||
+ return false
|
||
+ }
|
||
+ }
|
||
+ return true
|
||
+}
|
||
+
|
||
+var protocolPrefixes = [][]byte{
|
||
+ []byte("http://"),
|
||
+ []byte("https://"),
|
||
+ []byte("ftp://"),
|
||
+ []byte("file://"),
|
||
+ []byte("mailto:"),
|
||
+}
|
||
+
|
||
+const shortestPrefix = 6 // len("ftp://"), the shortest of the above
|
||
+
|
||
+func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ // quick check to rule out most false hits
|
||
+ if p.insideLink || len(data) < offset+shortestPrefix {
|
||
+ return 0, nil
|
||
+ }
|
||
+ for _, prefix := range protocolPrefixes {
|
||
+ endOfHead := offset + 8 // 8 is the len() of the longest prefix
|
||
+ if endOfHead > len(data) {
|
||
+ endOfHead = len(data)
|
||
+ }
|
||
+ if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) {
|
||
+ return autoLink(p, data, offset)
|
||
+ }
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func autoLink(p *Markdown, data []byte, offset int) (int, *Node) {
|
||
+ // Now a more expensive check to see if we're not inside an anchor element
|
||
+ anchorStart := offset
|
||
+ offsetFromAnchor := 0
|
||
+ for anchorStart > 0 && data[anchorStart] != '<' {
|
||
+ anchorStart--
|
||
+ offsetFromAnchor++
|
||
+ }
|
||
+
|
||
+ anchorStr := anchorRe.Find(data[anchorStart:])
|
||
+ if anchorStr != nil {
|
||
+ anchorClose := NewNode(HTMLSpan)
|
||
+ anchorClose.Literal = anchorStr[offsetFromAnchor:]
|
||
+ return len(anchorStr) - offsetFromAnchor, anchorClose
|
||
+ }
|
||
+
|
||
+ // scan backward for a word boundary
|
||
+ rewind := 0
|
||
+ for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) {
|
||
+ rewind++
|
||
+ }
|
||
+ if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ origData := data
|
||
+ data = data[offset-rewind:]
|
||
+
|
||
+ if !isSafeLink(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ linkEnd := 0
|
||
+ for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) {
|
||
+ linkEnd++
|
||
+ }
|
||
+
|
||
+ // Skip punctuation at the end of the link
|
||
+ if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' {
|
||
+ linkEnd--
|
||
+ }
|
||
+
|
||
+ // But don't skip semicolon if it's a part of escaped entity:
|
||
+ if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) {
|
||
+ linkEnd--
|
||
+ }
|
||
+
|
||
+ // See if the link finishes with a punctuation sign that can be closed.
|
||
+ var copen byte
|
||
+ switch data[linkEnd-1] {
|
||
+ case '"':
|
||
+ copen = '"'
|
||
+ case '\'':
|
||
+ copen = '\''
|
||
+ case ')':
|
||
+ copen = '('
|
||
+ case ']':
|
||
+ copen = '['
|
||
+ case '}':
|
||
+ copen = '{'
|
||
+ default:
|
||
+ copen = 0
|
||
+ }
|
||
+
|
||
+ if copen != 0 {
|
||
+ bufEnd := offset - rewind + linkEnd - 2
|
||
+
|
||
+ openDelim := 1
|
||
+
|
||
+ /* Try to close the final punctuation sign in this same line;
|
||
+ * if we managed to close it outside of the URL, that means that it's
|
||
+ * not part of the URL. If it closes inside the URL, that means it
|
||
+ * is part of the URL.
|
||
+ *
|
||
+ * Examples:
|
||
+ *
|
||
+ * foo http://www.pokemon.com/Pikachu_(Electric) bar
|
||
+ * => http://www.pokemon.com/Pikachu_(Electric)
|
||
+ *
|
||
+ * foo (http://www.pokemon.com/Pikachu_(Electric)) bar
|
||
+ * => http://www.pokemon.com/Pikachu_(Electric)
|
||
+ *
|
||
+ * foo http://www.pokemon.com/Pikachu_(Electric)) bar
|
||
+ * => http://www.pokemon.com/Pikachu_(Electric))
|
||
+ *
|
||
+ * (foo http://www.pokemon.com/Pikachu_(Electric)) bar
|
||
+ * => foo http://www.pokemon.com/Pikachu_(Electric)
|
||
+ */
|
||
+
|
||
+ for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 {
|
||
+ if origData[bufEnd] == data[linkEnd-1] {
|
||
+ openDelim++
|
||
+ }
|
||
+
|
||
+ if origData[bufEnd] == copen {
|
||
+ openDelim--
|
||
+ }
|
||
+
|
||
+ bufEnd--
|
||
+ }
|
||
+
|
||
+ if openDelim == 0 {
|
||
+ linkEnd--
|
||
+ }
|
||
+ }
|
||
+
|
||
+ var uLink bytes.Buffer
|
||
+ unescapeText(&uLink, data[:linkEnd])
|
||
+
|
||
+ if uLink.Len() > 0 {
|
||
+ node := NewNode(Link)
|
||
+ node.Destination = uLink.Bytes()
|
||
+ node.AppendChild(text(uLink.Bytes()))
|
||
+ return linkEnd, node
|
||
+ }
|
||
+
|
||
+ return linkEnd, nil
|
||
+}
|
||
+
|
||
+func isEndOfLink(char byte) bool {
|
||
+ return isspace(char) || char == '<'
|
||
+}
|
||
+
|
||
+var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
|
||
+var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
|
||
+
|
||
+func isSafeLink(link []byte) bool {
|
||
+ for _, path := range validPaths {
|
||
+ if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
|
||
+ if len(link) == len(path) {
|
||
+ return true
|
||
+ } else if isalnum(link[len(path)]) {
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ for _, prefix := range validUris {
|
||
+ // TODO: handle unicode here
|
||
+ // case-insensitive prefix test
|
||
+ if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) {
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return false
|
||
+}
|
||
+
|
||
+// return the length of the given tag, or 0 is it's not valid
|
||
+func tagLength(data []byte) (autolink autolinkType, end int) {
|
||
+ var i, j int
|
||
+
|
||
+ // a valid tag can't be shorter than 3 chars
|
||
+ if len(data) < 3 {
|
||
+ return notAutolink, 0
|
||
+ }
|
||
+
|
||
+ // begins with a '<' optionally followed by '/', followed by letter or number
|
||
+ if data[0] != '<' {
|
||
+ return notAutolink, 0
|
||
+ }
|
||
+ if data[1] == '/' {
|
||
+ i = 2
|
||
+ } else {
|
||
+ i = 1
|
||
+ }
|
||
+
|
||
+ if !isalnum(data[i]) {
|
||
+ return notAutolink, 0
|
||
+ }
|
||
+
|
||
+ // scheme test
|
||
+ autolink = notAutolink
|
||
+
|
||
+ // try to find the beginning of an URI
|
||
+ for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ if i > 1 && i < len(data) && data[i] == '@' {
|
||
+ if j = isMailtoAutoLink(data[i:]); j != 0 {
|
||
+ return emailAutolink, i + j
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if i > 2 && i < len(data) && data[i] == ':' {
|
||
+ autolink = normalAutolink
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // complete autolink test: no whitespace or ' or "
|
||
+ switch {
|
||
+ case i >= len(data):
|
||
+ autolink = notAutolink
|
||
+ case autolink != notAutolink:
|
||
+ j = i
|
||
+
|
||
+ for i < len(data) {
|
||
+ if data[i] == '\\' {
|
||
+ i += 2
|
||
+ } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) {
|
||
+ break
|
||
+ } else {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ }
|
||
+
|
||
+ if i >= len(data) {
|
||
+ return autolink, 0
|
||
+ }
|
||
+ if i > j && data[i] == '>' {
|
||
+ return autolink, i + 1
|
||
+ }
|
||
+
|
||
+ // one of the forbidden chars has been found
|
||
+ autolink = notAutolink
|
||
+ }
|
||
+ i += bytes.IndexByte(data[i:], '>')
|
||
+ if i < 0 {
|
||
+ return autolink, 0
|
||
+ }
|
||
+ return autolink, i + 1
|
||
+}
|
||
+
|
||
+// look for the address part of a mail autolink and '>'
|
||
+// this is less strict than the original markdown e-mail address matching
|
||
+func isMailtoAutoLink(data []byte) int {
|
||
+ nb := 0
|
||
+
|
||
+ // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
|
||
+ for i := 0; i < len(data); i++ {
|
||
+ if isalnum(data[i]) {
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ switch data[i] {
|
||
+ case '@':
|
||
+ nb++
|
||
+
|
||
+ case '-', '.', '_':
|
||
+ break
|
||
+
|
||
+ case '>':
|
||
+ if nb == 1 {
|
||
+ return i + 1
|
||
+ }
|
||
+ return 0
|
||
+ default:
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return 0
|
||
+}
|
||
+
|
||
+// look for the next emph char, skipping other constructs
|
||
+func helperFindEmphChar(data []byte, c byte) int {
|
||
+ i := 0
|
||
+
|
||
+ for i < len(data) {
|
||
+ for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return 0
|
||
+ }
|
||
+ // do not count escaped chars
|
||
+ if i != 0 && data[i-1] == '\\' {
|
||
+ i++
|
||
+ continue
|
||
+ }
|
||
+ if data[i] == c {
|
||
+ return i
|
||
+ }
|
||
+
|
||
+ if data[i] == '`' {
|
||
+ // skip a code span
|
||
+ tmpI := 0
|
||
+ i++
|
||
+ for i < len(data) && data[i] != '`' {
|
||
+ if tmpI == 0 && data[i] == c {
|
||
+ tmpI = i
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return tmpI
|
||
+ }
|
||
+ i++
|
||
+ } else if data[i] == '[' {
|
||
+ // skip a link
|
||
+ tmpI := 0
|
||
+ i++
|
||
+ for i < len(data) && data[i] != ']' {
|
||
+ if tmpI == 0 && data[i] == c {
|
||
+ tmpI = i
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ i++
|
||
+ for i < len(data) && (data[i] == ' ' || data[i] == '\n') {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return tmpI
|
||
+ }
|
||
+ if data[i] != '[' && data[i] != '(' { // not a link
|
||
+ if tmpI > 0 {
|
||
+ return tmpI
|
||
+ }
|
||
+ continue
|
||
+ }
|
||
+ cc := data[i]
|
||
+ i++
|
||
+ for i < len(data) && data[i] != cc {
|
||
+ if tmpI == 0 && data[i] == c {
|
||
+ return i
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return tmpI
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) {
|
||
+ i := 0
|
||
+
|
||
+ // skip one symbol if coming from emph3
|
||
+ if len(data) > 1 && data[0] == c && data[1] == c {
|
||
+ i = 1
|
||
+ }
|
||
+
|
||
+ for i < len(data) {
|
||
+ length := helperFindEmphChar(data[i:], c)
|
||
+ if length == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+ i += length
|
||
+ if i >= len(data) {
|
||
+ return 0, nil
|
||
+ }
|
||
+
|
||
+ if i+1 < len(data) && data[i+1] == c {
|
||
+ i++
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ if data[i] == c && !isspace(data[i-1]) {
|
||
+
|
||
+ if p.extensions&NoIntraEmphasis != 0 {
|
||
+ if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) {
|
||
+ continue
|
||
+ }
|
||
+ }
|
||
+
|
||
+ emph := NewNode(Emph)
|
||
+ p.inline(emph, data[:i])
|
||
+ return i + 1, emph
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) {
|
||
+ i := 0
|
||
+
|
||
+ for i < len(data) {
|
||
+ length := helperFindEmphChar(data[i:], c)
|
||
+ if length == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+ i += length
|
||
+
|
||
+ if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) {
|
||
+ nodeType := Strong
|
||
+ if c == '~' {
|
||
+ nodeType = Del
|
||
+ }
|
||
+ node := NewNode(nodeType)
|
||
+ p.inline(node, data[:i])
|
||
+ return i + 2, node
|
||
+ }
|
||
+ i++
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) {
|
||
+ i := 0
|
||
+ origData := data
|
||
+ data = data[offset:]
|
||
+
|
||
+ for i < len(data) {
|
||
+ length := helperFindEmphChar(data[i:], c)
|
||
+ if length == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+ i += length
|
||
+
|
||
+ // skip whitespace preceded symbols
|
||
+ if data[i] != c || isspace(data[i-1]) {
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ switch {
|
||
+ case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
|
||
+ // triple symbol found
|
||
+ strong := NewNode(Strong)
|
||
+ em := NewNode(Emph)
|
||
+ strong.AppendChild(em)
|
||
+ p.inline(em, data[:i])
|
||
+ return i + 3, strong
|
||
+ case (i+1 < len(data) && data[i+1] == c):
|
||
+ // double symbol found, hand over to emph1
|
||
+ length, node := helperEmphasis(p, origData[offset-2:], c)
|
||
+ if length == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+ return length - 2, node
|
||
+ default:
|
||
+ // single symbol found, hand over to emph2
|
||
+ length, node := helperDoubleEmphasis(p, origData[offset-1:], c)
|
||
+ if length == 0 {
|
||
+ return 0, nil
|
||
+ }
|
||
+ return length - 1, node
|
||
+ }
|
||
+ }
|
||
+ return 0, nil
|
||
+}
|
||
+
|
||
+func text(s []byte) *Node {
|
||
+ node := NewNode(Text)
|
||
+ node.Literal = s
|
||
+ return node
|
||
+}
|
||
+
|
||
+func normalizeURI(s []byte) []byte {
|
||
+ return s // TODO: implement
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go
|
||
new file mode 100644
|
||
index 000000000000..58d2e4538c62
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/markdown.go
|
||
@@ -0,0 +1,950 @@
|
||
+// Blackfriday Markdown Processor
|
||
+// Available at http://github.com/russross/blackfriday
|
||
+//
|
||
+// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||
+// Distributed under the Simplified BSD License.
|
||
+// See README.md for details.
|
||
+
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "strings"
|
||
+ "unicode/utf8"
|
||
+)
|
||
+
|
||
+//
|
||
+// Markdown parsing and processing
|
||
+//
|
||
+
|
||
+// Version string of the package. Appears in the rendered document when
|
||
+// CompletePage flag is on.
|
||
+const Version = "2.0"
|
||
+
|
||
+// Extensions is a bitwise or'ed collection of enabled Blackfriday's
|
||
+// extensions.
|
||
+type Extensions int
|
||
+
|
||
+// These are the supported markdown parsing extensions.
|
||
+// OR these values together to select multiple extensions.
|
||
+const (
|
||
+ NoExtensions Extensions = 0
|
||
+ NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words
|
||
+ Tables // Render tables
|
||
+ FencedCode // Render fenced code blocks
|
||
+ Autolink // Detect embedded URLs that are not explicitly marked
|
||
+ Strikethrough // Strikethrough text using ~~test~~
|
||
+ LaxHTMLBlocks // Loosen up HTML block parsing rules
|
||
+ SpaceHeadings // Be strict about prefix heading rules
|
||
+ HardLineBreak // Translate newlines into line breaks
|
||
+ TabSizeEight // Expand tabs to eight spaces instead of four
|
||
+ Footnotes // Pandoc-style footnotes
|
||
+ NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
|
||
+ HeadingIDs // specify heading IDs with {#id}
|
||
+ Titleblock // Titleblock ala pandoc
|
||
+ AutoHeadingIDs // Create the heading ID from the text
|
||
+ BackslashLineBreak // Translate trailing backslashes into line breaks
|
||
+ DefinitionLists // Render definition lists
|
||
+
|
||
+ CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants |
|
||
+ SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes
|
||
+
|
||
+ CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
|
||
+ Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
|
||
+ BackslashLineBreak | DefinitionLists
|
||
+)
|
||
+
|
||
+// ListType contains bitwise or'ed flags for list and list item objects.
|
||
+type ListType int
|
||
+
|
||
+// These are the possible flag values for the ListItem renderer.
|
||
+// Multiple flag values may be ORed together.
|
||
+// These are mostly of interest if you are writing a new output format.
|
||
+const (
|
||
+ ListTypeOrdered ListType = 1 << iota
|
||
+ ListTypeDefinition
|
||
+ ListTypeTerm
|
||
+
|
||
+ ListItemContainsBlock
|
||
+ ListItemBeginningOfList // TODO: figure out if this is of any use now
|
||
+ ListItemEndOfList
|
||
+)
|
||
+
|
||
+// CellAlignFlags holds a type of alignment in a table cell.
|
||
+type CellAlignFlags int
|
||
+
|
||
+// These are the possible flag values for the table cell renderer.
|
||
+// Only a single one of these values will be used; they are not ORed together.
|
||
+// These are mostly of interest if you are writing a new output format.
|
||
+const (
|
||
+ TableAlignmentLeft CellAlignFlags = 1 << iota
|
||
+ TableAlignmentRight
|
||
+ TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight)
|
||
+)
|
||
+
|
||
+// The size of a tab stop.
|
||
+const (
|
||
+ TabSizeDefault = 4
|
||
+ TabSizeDouble = 8
|
||
+)
|
||
+
|
||
+// blockTags is a set of tags that are recognized as HTML block tags.
|
||
+// Any of these can be included in markdown text without special escaping.
|
||
+var blockTags = map[string]struct{}{
|
||
+ "blockquote": {},
|
||
+ "del": {},
|
||
+ "div": {},
|
||
+ "dl": {},
|
||
+ "fieldset": {},
|
||
+ "form": {},
|
||
+ "h1": {},
|
||
+ "h2": {},
|
||
+ "h3": {},
|
||
+ "h4": {},
|
||
+ "h5": {},
|
||
+ "h6": {},
|
||
+ "iframe": {},
|
||
+ "ins": {},
|
||
+ "math": {},
|
||
+ "noscript": {},
|
||
+ "ol": {},
|
||
+ "pre": {},
|
||
+ "p": {},
|
||
+ "script": {},
|
||
+ "style": {},
|
||
+ "table": {},
|
||
+ "ul": {},
|
||
+
|
||
+ // HTML5
|
||
+ "address": {},
|
||
+ "article": {},
|
||
+ "aside": {},
|
||
+ "canvas": {},
|
||
+ "figcaption": {},
|
||
+ "figure": {},
|
||
+ "footer": {},
|
||
+ "header": {},
|
||
+ "hgroup": {},
|
||
+ "main": {},
|
||
+ "nav": {},
|
||
+ "output": {},
|
||
+ "progress": {},
|
||
+ "section": {},
|
||
+ "video": {},
|
||
+}
|
||
+
|
||
+// Renderer is the rendering interface. This is mostly of interest if you are
|
||
+// implementing a new rendering format.
|
||
+//
|
||
+// Only an HTML implementation is provided in this repository, see the README
|
||
+// for external implementations.
|
||
+type Renderer interface {
|
||
+ // RenderNode is the main rendering method. It will be called once for
|
||
+ // every leaf node and twice for every non-leaf node (first with
|
||
+ // entering=true, then with entering=false). The method should write its
|
||
+ // rendition of the node to the supplied writer w.
|
||
+ RenderNode(w io.Writer, node *Node, entering bool) WalkStatus
|
||
+
|
||
+ // RenderHeader is a method that allows the renderer to produce some
|
||
+ // content preceding the main body of the output document. The header is
|
||
+ // understood in the broad sense here. For example, the default HTML
|
||
+ // renderer will write not only the HTML document preamble, but also the
|
||
+ // table of contents if it was requested.
|
||
+ //
|
||
+ // The method will be passed an entire document tree, in case a particular
|
||
+ // implementation needs to inspect it to produce output.
|
||
+ //
|
||
+ // The output should be written to the supplied writer w. If your
|
||
+ // implementation has no header to write, supply an empty implementation.
|
||
+ RenderHeader(w io.Writer, ast *Node)
|
||
+
|
||
+ // RenderFooter is a symmetric counterpart of RenderHeader.
|
||
+ RenderFooter(w io.Writer, ast *Node)
|
||
+}
|
||
+
|
||
+// Callback functions for inline parsing. One such function is defined
|
||
+// for each character that triggers a response when parsing inline data.
|
||
+type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node)
|
||
+
|
||
+// Markdown is a type that holds extensions and the runtime state used by
|
||
+// Parse, and the renderer. You can not use it directly, construct it with New.
|
||
+type Markdown struct {
|
||
+ renderer Renderer
|
||
+ referenceOverride ReferenceOverrideFunc
|
||
+ refs map[string]*reference
|
||
+ inlineCallback [256]inlineParser
|
||
+ extensions Extensions
|
||
+ nesting int
|
||
+ maxNesting int
|
||
+ insideLink bool
|
||
+
|
||
+ // Footnotes need to be ordered as well as available to quickly check for
|
||
+ // presence. If a ref is also a footnote, it's stored both in refs and here
|
||
+ // in notes. Slice is nil if footnotes not enabled.
|
||
+ notes []*reference
|
||
+
|
||
+ doc *Node
|
||
+ tip *Node // = doc
|
||
+ oldTip *Node
|
||
+ lastMatchedContainer *Node // = doc
|
||
+ allClosed bool
|
||
+}
|
||
+
|
||
+func (p *Markdown) getRef(refid string) (ref *reference, found bool) {
|
||
+ if p.referenceOverride != nil {
|
||
+ r, overridden := p.referenceOverride(refid)
|
||
+ if overridden {
|
||
+ if r == nil {
|
||
+ return nil, false
|
||
+ }
|
||
+ return &reference{
|
||
+ link: []byte(r.Link),
|
||
+ title: []byte(r.Title),
|
||
+ noteID: 0,
|
||
+ hasBlock: false,
|
||
+ text: []byte(r.Text)}, true
|
||
+ }
|
||
+ }
|
||
+ // refs are case insensitive
|
||
+ ref, found = p.refs[strings.ToLower(refid)]
|
||
+ return ref, found
|
||
+}
|
||
+
|
||
+func (p *Markdown) finalize(block *Node) {
|
||
+ above := block.Parent
|
||
+ block.open = false
|
||
+ p.tip = above
|
||
+}
|
||
+
|
||
+func (p *Markdown) addChild(node NodeType, offset uint32) *Node {
|
||
+ return p.addExistingChild(NewNode(node), offset)
|
||
+}
|
||
+
|
||
+func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node {
|
||
+ for !p.tip.canContain(node.Type) {
|
||
+ p.finalize(p.tip)
|
||
+ }
|
||
+ p.tip.AppendChild(node)
|
||
+ p.tip = node
|
||
+ return node
|
||
+}
|
||
+
|
||
+func (p *Markdown) closeUnmatchedBlocks() {
|
||
+ if !p.allClosed {
|
||
+ for p.oldTip != p.lastMatchedContainer {
|
||
+ parent := p.oldTip.Parent
|
||
+ p.finalize(p.oldTip)
|
||
+ p.oldTip = parent
|
||
+ }
|
||
+ p.allClosed = true
|
||
+ }
|
||
+}
|
||
+
|
||
+//
|
||
+//
|
||
+// Public interface
|
||
+//
|
||
+//
|
||
+
|
||
+// Reference represents the details of a link.
|
||
+// See the documentation in Options for more details on use-case.
|
||
+type Reference struct {
|
||
+ // Link is usually the URL the reference points to.
|
||
+ Link string
|
||
+ // Title is the alternate text describing the link in more detail.
|
||
+ Title string
|
||
+ // Text is the optional text to override the ref with if the syntax used was
|
||
+ // [refid][]
|
||
+ Text string
|
||
+}
|
||
+
|
||
+// ReferenceOverrideFunc is expected to be called with a reference string and
|
||
+// return either a valid Reference type that the reference string maps to or
|
||
+// nil. If overridden is false, the default reference logic will be executed.
|
||
+// See the documentation in Options for more details on use-case.
|
||
+type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
|
||
+
|
||
+// New constructs a Markdown processor. You can use the same With* functions as
|
||
+// for Run() to customize parser's behavior and the renderer.
|
||
+func New(opts ...Option) *Markdown {
|
||
+ var p Markdown
|
||
+ for _, opt := range opts {
|
||
+ opt(&p)
|
||
+ }
|
||
+ p.refs = make(map[string]*reference)
|
||
+ p.maxNesting = 16
|
||
+ p.insideLink = false
|
||
+ docNode := NewNode(Document)
|
||
+ p.doc = docNode
|
||
+ p.tip = docNode
|
||
+ p.oldTip = docNode
|
||
+ p.lastMatchedContainer = docNode
|
||
+ p.allClosed = true
|
||
+ // register inline parsers
|
||
+ p.inlineCallback[' '] = maybeLineBreak
|
||
+ p.inlineCallback['*'] = emphasis
|
||
+ p.inlineCallback['_'] = emphasis
|
||
+ if p.extensions&Strikethrough != 0 {
|
||
+ p.inlineCallback['~'] = emphasis
|
||
+ }
|
||
+ p.inlineCallback['`'] = codeSpan
|
||
+ p.inlineCallback['\n'] = lineBreak
|
||
+ p.inlineCallback['['] = link
|
||
+ p.inlineCallback['<'] = leftAngle
|
||
+ p.inlineCallback['\\'] = escape
|
||
+ p.inlineCallback['&'] = entity
|
||
+ p.inlineCallback['!'] = maybeImage
|
||
+ p.inlineCallback['^'] = maybeInlineFootnote
|
||
+ if p.extensions&Autolink != 0 {
|
||
+ p.inlineCallback['h'] = maybeAutoLink
|
||
+ p.inlineCallback['m'] = maybeAutoLink
|
||
+ p.inlineCallback['f'] = maybeAutoLink
|
||
+ p.inlineCallback['H'] = maybeAutoLink
|
||
+ p.inlineCallback['M'] = maybeAutoLink
|
||
+ p.inlineCallback['F'] = maybeAutoLink
|
||
+ }
|
||
+ if p.extensions&Footnotes != 0 {
|
||
+ p.notes = make([]*reference, 0)
|
||
+ }
|
||
+ return &p
|
||
+}
|
||
+
|
||
+// Option customizes the Markdown processor's default behavior.
|
||
+type Option func(*Markdown)
|
||
+
|
||
+// WithRenderer allows you to override the default renderer.
|
||
+func WithRenderer(r Renderer) Option {
|
||
+ return func(p *Markdown) {
|
||
+ p.renderer = r
|
||
+ }
|
||
+}
|
||
+
|
||
+// WithExtensions allows you to pick some of the many extensions provided by
|
||
+// Blackfriday. You can bitwise OR them.
|
||
+func WithExtensions(e Extensions) Option {
|
||
+ return func(p *Markdown) {
|
||
+ p.extensions = e
|
||
+ }
|
||
+}
|
||
+
|
||
+// WithNoExtensions turns off all extensions and custom behavior.
|
||
+func WithNoExtensions() Option {
|
||
+ return func(p *Markdown) {
|
||
+ p.extensions = NoExtensions
|
||
+ p.renderer = NewHTMLRenderer(HTMLRendererParameters{
|
||
+ Flags: HTMLFlagsNone,
|
||
+ })
|
||
+ }
|
||
+}
|
||
+
|
||
+// WithRefOverride sets an optional function callback that is called every
|
||
+// time a reference is resolved.
|
||
+//
|
||
+// In Markdown, the link reference syntax can be made to resolve a link to
|
||
+// a reference instead of an inline URL, in one of the following ways:
|
||
+//
|
||
+// * [link text][refid]
|
||
+// * [refid][]
|
||
+//
|
||
+// Usually, the refid is defined at the bottom of the Markdown document. If
|
||
+// this override function is provided, the refid is passed to the override
|
||
+// function first, before consulting the defined refids at the bottom. If
|
||
+// the override function indicates an override did not occur, the refids at
|
||
+// the bottom will be used to fill in the link details.
|
||
+func WithRefOverride(o ReferenceOverrideFunc) Option {
|
||
+ return func(p *Markdown) {
|
||
+ p.referenceOverride = o
|
||
+ }
|
||
+}
|
||
+
|
||
+// Run is the main entry point to Blackfriday. It parses and renders a
|
||
+// block of markdown-encoded text.
|
||
+//
|
||
+// The simplest invocation of Run takes one argument, input:
|
||
+// output := Run(input)
|
||
+// This will parse the input with CommonExtensions enabled and render it with
|
||
+// the default HTMLRenderer (with CommonHTMLFlags).
|
||
+//
|
||
+// Variadic arguments opts can customize the default behavior. Since Markdown
|
||
+// type does not contain exported fields, you can not use it directly. Instead,
|
||
+// use the With* functions. For example, this will call the most basic
|
||
+// functionality, with no extensions:
|
||
+// output := Run(input, WithNoExtensions())
|
||
+//
|
||
+// You can use any number of With* arguments, even contradicting ones. They
|
||
+// will be applied in order of appearance and the latter will override the
|
||
+// former:
|
||
+// output := Run(input, WithNoExtensions(), WithExtensions(exts),
|
||
+// WithRenderer(yourRenderer))
|
||
+func Run(input []byte, opts ...Option) []byte {
|
||
+ r := NewHTMLRenderer(HTMLRendererParameters{
|
||
+ Flags: CommonHTMLFlags,
|
||
+ })
|
||
+ optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)}
|
||
+ optList = append(optList, opts...)
|
||
+ parser := New(optList...)
|
||
+ ast := parser.Parse(input)
|
||
+ var buf bytes.Buffer
|
||
+ parser.renderer.RenderHeader(&buf, ast)
|
||
+ ast.Walk(func(node *Node, entering bool) WalkStatus {
|
||
+ return parser.renderer.RenderNode(&buf, node, entering)
|
||
+ })
|
||
+ parser.renderer.RenderFooter(&buf, ast)
|
||
+ return buf.Bytes()
|
||
+}
|
||
+
|
||
+// Parse is an entry point to the parsing part of Blackfriday. It takes an
|
||
+// input markdown document and produces a syntax tree for its contents. This
|
||
+// tree can then be rendered with a default or custom renderer, or
|
||
+// analyzed/transformed by the caller to whatever non-standard needs they have.
|
||
+// The return value is the root node of the syntax tree.
|
||
+func (p *Markdown) Parse(input []byte) *Node {
|
||
+ p.block(input)
|
||
+ // Walk the tree and finish up some of unfinished blocks
|
||
+ for p.tip != nil {
|
||
+ p.finalize(p.tip)
|
||
+ }
|
||
+ // Walk the tree again and process inline markdown in each block
|
||
+ p.doc.Walk(func(node *Node, entering bool) WalkStatus {
|
||
+ if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell {
|
||
+ p.inline(node, node.content)
|
||
+ node.content = nil
|
||
+ }
|
||
+ return GoToNext
|
||
+ })
|
||
+ p.parseRefsToAST()
|
||
+ return p.doc
|
||
+}
|
||
+
|
||
+func (p *Markdown) parseRefsToAST() {
|
||
+ if p.extensions&Footnotes == 0 || len(p.notes) == 0 {
|
||
+ return
|
||
+ }
|
||
+ p.tip = p.doc
|
||
+ block := p.addBlock(List, nil)
|
||
+ block.IsFootnotesList = true
|
||
+ block.ListFlags = ListTypeOrdered
|
||
+ flags := ListItemBeginningOfList
|
||
+ // Note: this loop is intentionally explicit, not range-form. This is
|
||
+ // because the body of the loop will append nested footnotes to p.notes and
|
||
+ // we need to process those late additions. Range form would only walk over
|
||
+ // the fixed initial set.
|
||
+ for i := 0; i < len(p.notes); i++ {
|
||
+ ref := p.notes[i]
|
||
+ p.addExistingChild(ref.footnote, 0)
|
||
+ block := ref.footnote
|
||
+ block.ListFlags = flags | ListTypeOrdered
|
||
+ block.RefLink = ref.link
|
||
+ if ref.hasBlock {
|
||
+ flags |= ListItemContainsBlock
|
||
+ p.block(ref.title)
|
||
+ } else {
|
||
+ p.inline(block, ref.title)
|
||
+ }
|
||
+ flags &^= ListItemBeginningOfList | ListItemContainsBlock
|
||
+ }
|
||
+ above := block.Parent
|
||
+ finalizeList(block)
|
||
+ p.tip = above
|
||
+ block.Walk(func(node *Node, entering bool) WalkStatus {
|
||
+ if node.Type == Paragraph || node.Type == Heading {
|
||
+ p.inline(node, node.content)
|
||
+ node.content = nil
|
||
+ }
|
||
+ return GoToNext
|
||
+ })
|
||
+}
|
||
+
|
||
+//
|
||
+// Link references
|
||
+//
|
||
+// This section implements support for references that (usually) appear
|
||
+// as footnotes in a document, and can be referenced anywhere in the document.
|
||
+// The basic format is:
|
||
+//
|
||
+// [1]: http://www.google.com/ "Google"
|
||
+// [2]: http://www.github.com/ "Github"
|
||
+//
|
||
+// Anywhere in the document, the reference can be linked by referring to its
|
||
+// label, i.e., 1 and 2 in this example, as in:
|
||
+//
|
||
+// This library is hosted on [Github][2], a git hosting site.
|
||
+//
|
||
+// Actual footnotes as specified in Pandoc and supported by some other Markdown
|
||
+// libraries such as php-markdown are also taken care of. They look like this:
|
||
+//
|
||
+// This sentence needs a bit of further explanation.[^note]
|
||
+//
|
||
+// [^note]: This is the explanation.
|
||
+//
|
||
+// Footnotes should be placed at the end of the document in an ordered list.
|
||
+// Finally, there are inline footnotes such as:
|
||
+//
|
||
+// Inline footnotes^[Also supported.] provide a quick inline explanation,
|
||
+// but are rendered at the bottom of the document.
|
||
+//
|
||
+
|
||
+// reference holds all information necessary for a reference-style links or
|
||
+// footnotes.
|
||
+//
|
||
+// Consider this markdown with reference-style links:
|
||
+//
|
||
+// [link][ref]
|
||
+//
|
||
+// [ref]: /url/ "tooltip title"
|
||
+//
|
||
+// It will be ultimately converted to this HTML:
|
||
+//
|
||
+// <p><a href=\"/url/\" title=\"title\">link</a></p>
|
||
+//
|
||
+// And a reference structure will be populated as follows:
|
||
+//
|
||
+// p.refs["ref"] = &reference{
|
||
+// link: "/url/",
|
||
+// title: "tooltip title",
|
||
+// }
|
||
+//
|
||
+// Alternatively, reference can contain information about a footnote. Consider
|
||
+// this markdown:
|
||
+//
|
||
+// Text needing a footnote.[^a]
|
||
+//
|
||
+// [^a]: This is the note
|
||
+//
|
||
+// A reference structure will be populated as follows:
|
||
+//
|
||
+// p.refs["a"] = &reference{
|
||
+// link: "a",
|
||
+// title: "This is the note",
|
||
+// noteID: <some positive int>,
|
||
+// }
|
||
+//
|
||
+// TODO: As you can see, it begs for splitting into two dedicated structures
|
||
+// for refs and for footnotes.
|
||
+type reference struct {
|
||
+ link []byte
|
||
+ title []byte
|
||
+ noteID int // 0 if not a footnote ref
|
||
+ hasBlock bool
|
||
+ footnote *Node // a link to the Item node within a list of footnotes
|
||
+
|
||
+ text []byte // only gets populated by refOverride feature with Reference.Text
|
||
+}
|
||
+
|
||
+func (r *reference) String() string {
|
||
+ return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}",
|
||
+ r.link, r.title, r.text, r.noteID, r.hasBlock)
|
||
+}
|
||
+
|
||
+// Check whether or not data starts with a reference link.
|
||
+// If so, it is parsed and stored in the list of references
|
||
+// (in the render struct).
|
||
+// Returns the number of bytes to skip to move past it,
|
||
+// or zero if the first line is not a reference.
|
||
+func isReference(p *Markdown, data []byte, tabSize int) int {
|
||
+ // up to 3 optional leading spaces
|
||
+ if len(data) < 4 {
|
||
+ return 0
|
||
+ }
|
||
+ i := 0
|
||
+ for i < 3 && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ noteID := 0
|
||
+
|
||
+ // id part: anything but a newline between brackets
|
||
+ if data[i] != '[' {
|
||
+ return 0
|
||
+ }
|
||
+ i++
|
||
+ if p.extensions&Footnotes != 0 {
|
||
+ if i < len(data) && data[i] == '^' {
|
||
+ // we can set it to anything here because the proper noteIds will
|
||
+ // be assigned later during the second pass. It just has to be != 0
|
||
+ noteID = 1
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+ idOffset := i
|
||
+ for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) || data[i] != ']' {
|
||
+ return 0
|
||
+ }
|
||
+ idEnd := i
|
||
+ // footnotes can have empty ID, like this: [^], but a reference can not be
|
||
+ // empty like this: []. Break early if it's not a footnote and there's no ID
|
||
+ if noteID == 0 && idOffset == idEnd {
|
||
+ return 0
|
||
+ }
|
||
+ // spacer: colon (space | tab)* newline? (space | tab)*
|
||
+ i++
|
||
+ if i >= len(data) || data[i] != ':' {
|
||
+ return 0
|
||
+ }
|
||
+ i++
|
||
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||
+ i++
|
||
+ }
|
||
+ if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
|
||
+ i++
|
||
+ if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||
+ i++
|
||
+ }
|
||
+ if i >= len(data) {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ var (
|
||
+ linkOffset, linkEnd int
|
||
+ titleOffset, titleEnd int
|
||
+ lineEnd int
|
||
+ raw []byte
|
||
+ hasBlock bool
|
||
+ )
|
||
+
|
||
+ if p.extensions&Footnotes != 0 && noteID != 0 {
|
||
+ linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
|
||
+ lineEnd = linkEnd
|
||
+ } else {
|
||
+ linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
|
||
+ }
|
||
+ if lineEnd == 0 {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ // a valid ref has been found
|
||
+
|
||
+ ref := &reference{
|
||
+ noteID: noteID,
|
||
+ hasBlock: hasBlock,
|
||
+ }
|
||
+
|
||
+ if noteID > 0 {
|
||
+ // reusing the link field for the id since footnotes don't have links
|
||
+ ref.link = data[idOffset:idEnd]
|
||
+ // if footnote, it's not really a title, it's the contained text
|
||
+ ref.title = raw
|
||
+ } else {
|
||
+ ref.link = data[linkOffset:linkEnd]
|
||
+ ref.title = data[titleOffset:titleEnd]
|
||
+ }
|
||
+
|
||
+ // id matches are case-insensitive
|
||
+ id := string(bytes.ToLower(data[idOffset:idEnd]))
|
||
+
|
||
+ p.refs[id] = ref
|
||
+
|
||
+ return lineEnd
|
||
+}
|
||
+
|
||
+func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
|
||
+ // link: whitespace-free sequence, optionally between angle brackets
|
||
+ if data[i] == '<' {
|
||
+ i++
|
||
+ }
|
||
+ linkOffset = i
|
||
+ for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
||
+ i++
|
||
+ }
|
||
+ linkEnd = i
|
||
+ if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
|
||
+ linkOffset++
|
||
+ linkEnd--
|
||
+ }
|
||
+
|
||
+ // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
|
||
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||
+ i++
|
||
+ }
|
||
+ if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // compute end-of-line
|
||
+ if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
|
||
+ lineEnd = i
|
||
+ }
|
||
+ if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
|
||
+ lineEnd++
|
||
+ }
|
||
+
|
||
+ // optional (space|tab)* spacer after a newline
|
||
+ if lineEnd > 0 {
|
||
+ i = lineEnd + 1
|
||
+ for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
|
||
+ i++
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // optional title: any non-newline sequence enclosed in '"() alone on its line
|
||
+ if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
|
||
+ i++
|
||
+ titleOffset = i
|
||
+
|
||
+ // look for EOL
|
||
+ for i < len(data) && data[i] != '\n' && data[i] != '\r' {
|
||
+ i++
|
||
+ }
|
||
+ if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
|
||
+ titleEnd = i + 1
|
||
+ } else {
|
||
+ titleEnd = i
|
||
+ }
|
||
+
|
||
+ // step back
|
||
+ i--
|
||
+ for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
|
||
+ i--
|
||
+ }
|
||
+ if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
|
||
+ lineEnd = titleEnd
|
||
+ titleEnd = i
|
||
+ }
|
||
+ }
|
||
+
|
||
+ return
|
||
+}
|
||
+
|
||
+// The first bit of this logic is the same as Parser.listItem, but the rest
|
||
+// is much simpler. This function simply finds the entire block and shifts it
|
||
+// over by one tab if it is indeed a block (just returns the line if it's not).
|
||
+// blockEnd is the end of the section in the input buffer, and contents is the
|
||
+// extracted text that was shifted over one tab. It will need to be rendered at
|
||
+// the end of the document.
|
||
+func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
|
||
+ if i == 0 || len(data) == 0 {
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // skip leading whitespace on first line
|
||
+ for i < len(data) && data[i] == ' ' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ blockStart = i
|
||
+
|
||
+ // find the end of the line
|
||
+ blockEnd = i
|
||
+ for i < len(data) && data[i-1] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // get working buffer
|
||
+ var raw bytes.Buffer
|
||
+
|
||
+ // put the first line into the working buffer
|
||
+ raw.Write(data[blockEnd:i])
|
||
+ blockEnd = i
|
||
+
|
||
+ // process the following lines
|
||
+ containsBlankLine := false
|
||
+
|
||
+gatherLines:
|
||
+ for blockEnd < len(data) {
|
||
+ i++
|
||
+
|
||
+ // find the end of this line
|
||
+ for i < len(data) && data[i-1] != '\n' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ // if it is an empty line, guess that it is part of this item
|
||
+ // and move on to the next line
|
||
+ if p.isEmpty(data[blockEnd:i]) > 0 {
|
||
+ containsBlankLine = true
|
||
+ blockEnd = i
|
||
+ continue
|
||
+ }
|
||
+
|
||
+ n := 0
|
||
+ if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
|
||
+ // this is the end of the block.
|
||
+ // we don't want to include this last line in the index.
|
||
+ break gatherLines
|
||
+ }
|
||
+
|
||
+ // if there were blank lines before this one, insert a new one now
|
||
+ if containsBlankLine {
|
||
+ raw.WriteByte('\n')
|
||
+ containsBlankLine = false
|
||
+ }
|
||
+
|
||
+ // get rid of that first tab, write to buffer
|
||
+ raw.Write(data[blockEnd+n : i])
|
||
+ hasBlock = true
|
||
+
|
||
+ blockEnd = i
|
||
+ }
|
||
+
|
||
+ if data[blockEnd-1] != '\n' {
|
||
+ raw.WriteByte('\n')
|
||
+ }
|
||
+
|
||
+ contents = raw.Bytes()
|
||
+
|
||
+ return
|
||
+}
|
||
+
|
||
+//
|
||
+//
|
||
+// Miscellaneous helper functions
|
||
+//
|
||
+//
|
||
+
|
||
+// Test if a character is a punctuation symbol.
|
||
+// Taken from a private function in regexp in the stdlib.
|
||
+func ispunct(c byte) bool {
|
||
+ for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
|
||
+ if c == r {
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// Test if a character is a whitespace character.
|
||
+func isspace(c byte) bool {
|
||
+ return ishorizontalspace(c) || isverticalspace(c)
|
||
+}
|
||
+
|
||
+// Test if a character is a horizontal whitespace character.
|
||
+func ishorizontalspace(c byte) bool {
|
||
+ return c == ' ' || c == '\t'
|
||
+}
|
||
+
|
||
+// Test if a character is a vertical character.
|
||
+func isverticalspace(c byte) bool {
|
||
+ return c == '\n' || c == '\r' || c == '\f' || c == '\v'
|
||
+}
|
||
+
|
||
+// Test if a character is letter.
|
||
+func isletter(c byte) bool {
|
||
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||
+}
|
||
+
|
||
+// Test if a character is a letter or a digit.
|
||
+// TODO: check when this is looking for ASCII alnum and when it should use unicode
|
||
+func isalnum(c byte) bool {
|
||
+ return (c >= '0' && c <= '9') || isletter(c)
|
||
+}
|
||
+
|
||
+// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
|
||
+// always ends output with a newline
|
||
+func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
|
||
+ // first, check for common cases: no tabs, or only tabs at beginning of line
|
||
+ i, prefix := 0, 0
|
||
+ slowcase := false
|
||
+ for i = 0; i < len(line); i++ {
|
||
+ if line[i] == '\t' {
|
||
+ if prefix == i {
|
||
+ prefix++
|
||
+ } else {
|
||
+ slowcase = true
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ // no need to decode runes if all tabs are at the beginning of the line
|
||
+ if !slowcase {
|
||
+ for i = 0; i < prefix*tabSize; i++ {
|
||
+ out.WriteByte(' ')
|
||
+ }
|
||
+ out.Write(line[prefix:])
|
||
+ return
|
||
+ }
|
||
+
|
||
+ // the slow case: we need to count runes to figure out how
|
||
+ // many spaces to insert for each tab
|
||
+ column := 0
|
||
+ i = 0
|
||
+ for i < len(line) {
|
||
+ start := i
|
||
+ for i < len(line) && line[i] != '\t' {
|
||
+ _, size := utf8.DecodeRune(line[i:])
|
||
+ i += size
|
||
+ column++
|
||
+ }
|
||
+
|
||
+ if i > start {
|
||
+ out.Write(line[start:i])
|
||
+ }
|
||
+
|
||
+ if i >= len(line) {
|
||
+ break
|
||
+ }
|
||
+
|
||
+ for {
|
||
+ out.WriteByte(' ')
|
||
+ column++
|
||
+ if column%tabSize == 0 {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+
|
||
+ i++
|
||
+ }
|
||
+}
|
||
+
|
||
+// Find if a line counts as indented or not.
|
||
+// Returns number of characters the indent is (0 = not indented).
|
||
+func isIndented(data []byte, indentSize int) int {
|
||
+ if len(data) == 0 {
|
||
+ return 0
|
||
+ }
|
||
+ if data[0] == '\t' {
|
||
+ return 1
|
||
+ }
|
||
+ if len(data) < indentSize {
|
||
+ return 0
|
||
+ }
|
||
+ for i := 0; i < indentSize; i++ {
|
||
+ if data[i] != ' ' {
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+ return indentSize
|
||
+}
|
||
+
|
||
+// Create a url-safe slug for fragments
|
||
+func slugify(in []byte) []byte {
|
||
+ if len(in) == 0 {
|
||
+ return in
|
||
+ }
|
||
+ out := make([]byte, 0, len(in))
|
||
+ sym := false
|
||
+
|
||
+ for _, ch := range in {
|
||
+ if isalnum(ch) {
|
||
+ sym = false
|
||
+ out = append(out, ch)
|
||
+ } else if sym {
|
||
+ continue
|
||
+ } else {
|
||
+ out = append(out, '-')
|
||
+ sym = true
|
||
+ }
|
||
+ }
|
||
+ var a, b int
|
||
+ var ch byte
|
||
+ for a, ch = range out {
|
||
+ if ch != '-' {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ for b = len(out) - 1; b > 0; b-- {
|
||
+ if out[b] != '-' {
|
||
+ break
|
||
+ }
|
||
+ }
|
||
+ return out[a : b+1]
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go
|
||
new file mode 100644
|
||
index 000000000000..04e6050ceeae
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/node.go
|
||
@@ -0,0 +1,360 @@
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+)
|
||
+
|
||
+// NodeType specifies a type of a single node of a syntax tree. Usually one
|
||
+// node (and its type) corresponds to a single markdown feature, e.g. emphasis
|
||
+// or code block.
|
||
+type NodeType int
|
||
+
|
||
+// Constants for identifying different types of nodes. See NodeType.
|
||
+const (
|
||
+ Document NodeType = iota
|
||
+ BlockQuote
|
||
+ List
|
||
+ Item
|
||
+ Paragraph
|
||
+ Heading
|
||
+ HorizontalRule
|
||
+ Emph
|
||
+ Strong
|
||
+ Del
|
||
+ Link
|
||
+ Image
|
||
+ Text
|
||
+ HTMLBlock
|
||
+ CodeBlock
|
||
+ Softbreak
|
||
+ Hardbreak
|
||
+ Code
|
||
+ HTMLSpan
|
||
+ Table
|
||
+ TableCell
|
||
+ TableHead
|
||
+ TableBody
|
||
+ TableRow
|
||
+)
|
||
+
|
||
+var nodeTypeNames = []string{
|
||
+ Document: "Document",
|
||
+ BlockQuote: "BlockQuote",
|
||
+ List: "List",
|
||
+ Item: "Item",
|
||
+ Paragraph: "Paragraph",
|
||
+ Heading: "Heading",
|
||
+ HorizontalRule: "HorizontalRule",
|
||
+ Emph: "Emph",
|
||
+ Strong: "Strong",
|
||
+ Del: "Del",
|
||
+ Link: "Link",
|
||
+ Image: "Image",
|
||
+ Text: "Text",
|
||
+ HTMLBlock: "HTMLBlock",
|
||
+ CodeBlock: "CodeBlock",
|
||
+ Softbreak: "Softbreak",
|
||
+ Hardbreak: "Hardbreak",
|
||
+ Code: "Code",
|
||
+ HTMLSpan: "HTMLSpan",
|
||
+ Table: "Table",
|
||
+ TableCell: "TableCell",
|
||
+ TableHead: "TableHead",
|
||
+ TableBody: "TableBody",
|
||
+ TableRow: "TableRow",
|
||
+}
|
||
+
|
||
+func (t NodeType) String() string {
|
||
+ return nodeTypeNames[t]
|
||
+}
|
||
+
|
||
+// ListData contains fields relevant to a List and Item node type.
|
||
+type ListData struct {
|
||
+ ListFlags ListType
|
||
+ Tight bool // Skip <p>s around list item data if true
|
||
+ BulletChar byte // '*', '+' or '-' in bullet lists
|
||
+ Delimiter byte // '.' or ')' after the number in ordered lists
|
||
+ RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
|
||
+ IsFootnotesList bool // This is a list of footnotes
|
||
+}
|
||
+
|
||
+// LinkData contains fields relevant to a Link node type.
|
||
+type LinkData struct {
|
||
+ Destination []byte // Destination is what goes into a href
|
||
+ Title []byte // Title is the tooltip thing that goes in a title attribute
|
||
+ NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote
|
||
+ Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil.
|
||
+}
|
||
+
|
||
+// CodeBlockData contains fields relevant to a CodeBlock node type.
|
||
+type CodeBlockData struct {
|
||
+ IsFenced bool // Specifies whether it's a fenced code block or an indented one
|
||
+ Info []byte // This holds the info string
|
||
+ FenceChar byte
|
||
+ FenceLength int
|
||
+ FenceOffset int
|
||
+}
|
||
+
|
||
+// TableCellData contains fields relevant to a TableCell node type.
|
||
+type TableCellData struct {
|
||
+ IsHeader bool // This tells if it's under the header row
|
||
+ Align CellAlignFlags // This holds the value for align attribute
|
||
+}
|
||
+
|
||
+// HeadingData contains fields relevant to a Heading node type.
|
||
+type HeadingData struct {
|
||
+ Level int // This holds the heading level number
|
||
+ HeadingID string // This might hold heading ID, if present
|
||
+ IsTitleblock bool // Specifies whether it's a title block
|
||
+}
|
||
+
|
||
+// Node is a single element in the abstract syntax tree of the parsed document.
|
||
+// It holds connections to the structurally neighboring nodes and, for certain
|
||
+// types of nodes, additional information that might be needed when rendering.
|
||
+type Node struct {
|
||
+ Type NodeType // Determines the type of the node
|
||
+ Parent *Node // Points to the parent
|
||
+ FirstChild *Node // Points to the first child, if any
|
||
+ LastChild *Node // Points to the last child, if any
|
||
+ Prev *Node // Previous sibling; nil if it's the first child
|
||
+ Next *Node // Next sibling; nil if it's the last child
|
||
+
|
||
+ Literal []byte // Text contents of the leaf nodes
|
||
+
|
||
+ HeadingData // Populated if Type is Heading
|
||
+ ListData // Populated if Type is List
|
||
+ CodeBlockData // Populated if Type is CodeBlock
|
||
+ LinkData // Populated if Type is Link
|
||
+ TableCellData // Populated if Type is TableCell
|
||
+
|
||
+ content []byte // Markdown content of the block nodes
|
||
+ open bool // Specifies an open block node that has not been finished to process yet
|
||
+}
|
||
+
|
||
+// NewNode allocates a node of a specified type.
|
||
+func NewNode(typ NodeType) *Node {
|
||
+ return &Node{
|
||
+ Type: typ,
|
||
+ open: true,
|
||
+ }
|
||
+}
|
||
+
|
||
+func (n *Node) String() string {
|
||
+ ellipsis := ""
|
||
+ snippet := n.Literal
|
||
+ if len(snippet) > 16 {
|
||
+ snippet = snippet[:16]
|
||
+ ellipsis = "..."
|
||
+ }
|
||
+ return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis)
|
||
+}
|
||
+
|
||
+// Unlink removes node 'n' from the tree.
|
||
+// It panics if the node is nil.
|
||
+func (n *Node) Unlink() {
|
||
+ if n.Prev != nil {
|
||
+ n.Prev.Next = n.Next
|
||
+ } else if n.Parent != nil {
|
||
+ n.Parent.FirstChild = n.Next
|
||
+ }
|
||
+ if n.Next != nil {
|
||
+ n.Next.Prev = n.Prev
|
||
+ } else if n.Parent != nil {
|
||
+ n.Parent.LastChild = n.Prev
|
||
+ }
|
||
+ n.Parent = nil
|
||
+ n.Next = nil
|
||
+ n.Prev = nil
|
||
+}
|
||
+
|
||
+// AppendChild adds a node 'child' as a child of 'n'.
|
||
+// It panics if either node is nil.
|
||
+func (n *Node) AppendChild(child *Node) {
|
||
+ child.Unlink()
|
||
+ child.Parent = n
|
||
+ if n.LastChild != nil {
|
||
+ n.LastChild.Next = child
|
||
+ child.Prev = n.LastChild
|
||
+ n.LastChild = child
|
||
+ } else {
|
||
+ n.FirstChild = child
|
||
+ n.LastChild = child
|
||
+ }
|
||
+}
|
||
+
|
||
+// InsertBefore inserts 'sibling' immediately before 'n'.
|
||
+// It panics if either node is nil.
|
||
+func (n *Node) InsertBefore(sibling *Node) {
|
||
+ sibling.Unlink()
|
||
+ sibling.Prev = n.Prev
|
||
+ if sibling.Prev != nil {
|
||
+ sibling.Prev.Next = sibling
|
||
+ }
|
||
+ sibling.Next = n
|
||
+ n.Prev = sibling
|
||
+ sibling.Parent = n.Parent
|
||
+ if sibling.Prev == nil {
|
||
+ sibling.Parent.FirstChild = sibling
|
||
+ }
|
||
+}
|
||
+
|
||
+// IsContainer returns true if 'n' can contain children.
|
||
+func (n *Node) IsContainer() bool {
|
||
+ switch n.Type {
|
||
+ case Document:
|
||
+ fallthrough
|
||
+ case BlockQuote:
|
||
+ fallthrough
|
||
+ case List:
|
||
+ fallthrough
|
||
+ case Item:
|
||
+ fallthrough
|
||
+ case Paragraph:
|
||
+ fallthrough
|
||
+ case Heading:
|
||
+ fallthrough
|
||
+ case Emph:
|
||
+ fallthrough
|
||
+ case Strong:
|
||
+ fallthrough
|
||
+ case Del:
|
||
+ fallthrough
|
||
+ case Link:
|
||
+ fallthrough
|
||
+ case Image:
|
||
+ fallthrough
|
||
+ case Table:
|
||
+ fallthrough
|
||
+ case TableHead:
|
||
+ fallthrough
|
||
+ case TableBody:
|
||
+ fallthrough
|
||
+ case TableRow:
|
||
+ fallthrough
|
||
+ case TableCell:
|
||
+ return true
|
||
+ default:
|
||
+ return false
|
||
+ }
|
||
+}
|
||
+
|
||
+// IsLeaf returns true if 'n' is a leaf node.
|
||
+func (n *Node) IsLeaf() bool {
|
||
+ return !n.IsContainer()
|
||
+}
|
||
+
|
||
+func (n *Node) canContain(t NodeType) bool {
|
||
+ if n.Type == List {
|
||
+ return t == Item
|
||
+ }
|
||
+ if n.Type == Document || n.Type == BlockQuote || n.Type == Item {
|
||
+ return t != Item
|
||
+ }
|
||
+ if n.Type == Table {
|
||
+ return t == TableHead || t == TableBody
|
||
+ }
|
||
+ if n.Type == TableHead || n.Type == TableBody {
|
||
+ return t == TableRow
|
||
+ }
|
||
+ if n.Type == TableRow {
|
||
+ return t == TableCell
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// WalkStatus allows NodeVisitor to have some control over the tree traversal.
|
||
+// It is returned from NodeVisitor and different values allow Node.Walk to
|
||
+// decide which node to go to next.
|
||
+type WalkStatus int
|
||
+
|
||
+const (
|
||
+ // GoToNext is the default traversal of every node.
|
||
+ GoToNext WalkStatus = iota
|
||
+ // SkipChildren tells walker to skip all children of current node.
|
||
+ SkipChildren
|
||
+ // Terminate tells walker to terminate the traversal.
|
||
+ Terminate
|
||
+)
|
||
+
|
||
+// NodeVisitor is a callback to be called when traversing the syntax tree.
|
||
+// Called twice for every node: once with entering=true when the branch is
|
||
+// first visited, then with entering=false after all the children are done.
|
||
+type NodeVisitor func(node *Node, entering bool) WalkStatus
|
||
+
|
||
+// Walk is a convenience method that instantiates a walker and starts a
|
||
+// traversal of subtree rooted at n.
|
||
+func (n *Node) Walk(visitor NodeVisitor) {
|
||
+ w := newNodeWalker(n)
|
||
+ for w.current != nil {
|
||
+ status := visitor(w.current, w.entering)
|
||
+ switch status {
|
||
+ case GoToNext:
|
||
+ w.next()
|
||
+ case SkipChildren:
|
||
+ w.entering = false
|
||
+ w.next()
|
||
+ case Terminate:
|
||
+ return
|
||
+ }
|
||
+ }
|
||
+}
|
||
+
|
||
+type nodeWalker struct {
|
||
+ current *Node
|
||
+ root *Node
|
||
+ entering bool
|
||
+}
|
||
+
|
||
+func newNodeWalker(root *Node) *nodeWalker {
|
||
+ return &nodeWalker{
|
||
+ current: root,
|
||
+ root: root,
|
||
+ entering: true,
|
||
+ }
|
||
+}
|
||
+
|
||
+func (nw *nodeWalker) next() {
|
||
+ if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root {
|
||
+ nw.current = nil
|
||
+ return
|
||
+ }
|
||
+ if nw.entering && nw.current.IsContainer() {
|
||
+ if nw.current.FirstChild != nil {
|
||
+ nw.current = nw.current.FirstChild
|
||
+ nw.entering = true
|
||
+ } else {
|
||
+ nw.entering = false
|
||
+ }
|
||
+ } else if nw.current.Next == nil {
|
||
+ nw.current = nw.current.Parent
|
||
+ nw.entering = false
|
||
+ } else {
|
||
+ nw.current = nw.current.Next
|
||
+ nw.entering = true
|
||
+ }
|
||
+}
|
||
+
|
||
+func dump(ast *Node) {
|
||
+ fmt.Println(dumpString(ast))
|
||
+}
|
||
+
|
||
+func dumpR(ast *Node, depth int) string {
|
||
+ if ast == nil {
|
||
+ return ""
|
||
+ }
|
||
+ indent := bytes.Repeat([]byte("\t"), depth)
|
||
+ content := ast.Literal
|
||
+ if content == nil {
|
||
+ content = ast.content
|
||
+ }
|
||
+ result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content)
|
||
+ for n := ast.FirstChild; n != nil; n = n.Next {
|
||
+ result += dumpR(n, depth+1)
|
||
+ }
|
||
+ return result
|
||
+}
|
||
+
|
||
+func dumpString(ast *Node) string {
|
||
+ return dumpR(ast, 0)
|
||
+}
|
||
diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go
|
||
new file mode 100644
|
||
index 000000000000..3a220e94247d
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go
|
||
@@ -0,0 +1,457 @@
|
||
+//
|
||
+// Blackfriday Markdown Processor
|
||
+// Available at http://github.com/russross/blackfriday
|
||
+//
|
||
+// Copyright © 2011 Russ Ross <russ@russross.com>.
|
||
+// Distributed under the Simplified BSD License.
|
||
+// See README.md for details.
|
||
+//
|
||
+
|
||
+//
|
||
+//
|
||
+// SmartyPants rendering
|
||
+//
|
||
+//
|
||
+
|
||
+package blackfriday
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "io"
|
||
+)
|
||
+
|
||
+// SPRenderer is a struct containing state of a Smartypants renderer.
|
||
+type SPRenderer struct {
|
||
+ inSingleQuote bool
|
||
+ inDoubleQuote bool
|
||
+ callbacks [256]smartCallback
|
||
+}
|
||
+
|
||
+func wordBoundary(c byte) bool {
|
||
+ return c == 0 || isspace(c) || ispunct(c)
|
||
+}
|
||
+
|
||
+func tolower(c byte) byte {
|
||
+ if c >= 'A' && c <= 'Z' {
|
||
+ return c - 'A' + 'a'
|
||
+ }
|
||
+ return c
|
||
+}
|
||
+
|
||
+func isdigit(c byte) bool {
|
||
+ return c >= '0' && c <= '9'
|
||
+}
|
||
+
|
||
+func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool {
|
||
+ // edge of the buffer is likely to be a tag that we don't get to see,
|
||
+ // so we treat it like text sometimes
|
||
+
|
||
+ // enumerate all sixteen possibilities for (previousChar, nextChar)
|
||
+ // each can be one of {0, space, punct, other}
|
||
+ switch {
|
||
+ case previousChar == 0 && nextChar == 0:
|
||
+ // context is not any help here, so toggle
|
||
+ *isOpen = !*isOpen
|
||
+ case isspace(previousChar) && nextChar == 0:
|
||
+ // [ "] might be [ "<code>foo...]
|
||
+ *isOpen = true
|
||
+ case ispunct(previousChar) && nextChar == 0:
|
||
+ // [!"] hmm... could be [Run!"] or [("<code>...]
|
||
+ *isOpen = false
|
||
+ case /* isnormal(previousChar) && */ nextChar == 0:
|
||
+ // [a"] is probably a close
|
||
+ *isOpen = false
|
||
+ case previousChar == 0 && isspace(nextChar):
|
||
+ // [" ] might be [...foo</code>" ]
|
||
+ *isOpen = false
|
||
+ case isspace(previousChar) && isspace(nextChar):
|
||
+ // [ " ] context is not any help here, so toggle
|
||
+ *isOpen = !*isOpen
|
||
+ case ispunct(previousChar) && isspace(nextChar):
|
||
+ // [!" ] is probably a close
|
||
+ *isOpen = false
|
||
+ case /* isnormal(previousChar) && */ isspace(nextChar):
|
||
+ // [a" ] this is one of the easy cases
|
||
+ *isOpen = false
|
||
+ case previousChar == 0 && ispunct(nextChar):
|
||
+ // ["!] hmm... could be ["$1.95] or [</code>"!...]
|
||
+ *isOpen = false
|
||
+ case isspace(previousChar) && ispunct(nextChar):
|
||
+ // [ "!] looks more like [ "$1.95]
|
||
+ *isOpen = true
|
||
+ case ispunct(previousChar) && ispunct(nextChar):
|
||
+ // [!"!] context is not any help here, so toggle
|
||
+ *isOpen = !*isOpen
|
||
+ case /* isnormal(previousChar) && */ ispunct(nextChar):
|
||
+ // [a"!] is probably a close
|
||
+ *isOpen = false
|
||
+ case previousChar == 0 /* && isnormal(nextChar) */ :
|
||
+ // ["a] is probably an open
|
||
+ *isOpen = true
|
||
+ case isspace(previousChar) /* && isnormal(nextChar) */ :
|
||
+ // [ "a] this is one of the easy cases
|
||
+ *isOpen = true
|
||
+ case ispunct(previousChar) /* && isnormal(nextChar) */ :
|
||
+ // [!"a] is probably an open
|
||
+ *isOpen = true
|
||
+ default:
|
||
+ // [a'b] maybe a contraction?
|
||
+ *isOpen = false
|
||
+ }
|
||
+
|
||
+ // Note that with the limited lookahead, this non-breaking
|
||
+ // space will also be appended to single double quotes.
|
||
+ if addNBSP && !*isOpen {
|
||
+ out.WriteString(" ")
|
||
+ }
|
||
+
|
||
+ out.WriteByte('&')
|
||
+ if *isOpen {
|
||
+ out.WriteByte('l')
|
||
+ } else {
|
||
+ out.WriteByte('r')
|
||
+ }
|
||
+ out.WriteByte(quote)
|
||
+ out.WriteString("quo;")
|
||
+
|
||
+ if addNBSP && *isOpen {
|
||
+ out.WriteString(" ")
|
||
+ }
|
||
+
|
||
+ return true
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 2 {
|
||
+ t1 := tolower(text[1])
|
||
+
|
||
+ if t1 == '\'' {
|
||
+ nextChar := byte(0)
|
||
+ if len(text) >= 3 {
|
||
+ nextChar = text[2]
|
||
+ }
|
||
+ if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
||
+ return 1
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
|
||
+ out.WriteString("’")
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ if len(text) >= 3 {
|
||
+ t2 := tolower(text[2])
|
||
+
|
||
+ if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
|
||
+ (len(text) < 4 || wordBoundary(text[3])) {
|
||
+ out.WriteString("’")
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ nextChar := byte(0)
|
||
+ if len(text) > 1 {
|
||
+ nextChar = text[1]
|
||
+ }
|
||
+ if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) {
|
||
+ return 0
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 3 {
|
||
+ t1 := tolower(text[1])
|
||
+ t2 := tolower(text[2])
|
||
+
|
||
+ if t1 == 'c' && t2 == ')' {
|
||
+ out.WriteString("©")
|
||
+ return 2
|
||
+ }
|
||
+
|
||
+ if t1 == 'r' && t2 == ')' {
|
||
+ out.WriteString("®")
|
||
+ return 2
|
||
+ }
|
||
+
|
||
+ if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
|
||
+ out.WriteString("™")
|
||
+ return 3
|
||
+ }
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 2 {
|
||
+ if text[1] == '-' {
|
||
+ out.WriteString("—")
|
||
+ return 1
|
||
+ }
|
||
+
|
||
+ if wordBoundary(previousChar) && wordBoundary(text[1]) {
|
||
+ out.WriteString("–")
|
||
+ return 0
|
||
+ }
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
|
||
+ out.WriteString("—")
|
||
+ return 2
|
||
+ }
|
||
+ if len(text) >= 2 && text[1] == '-' {
|
||
+ out.WriteString("–")
|
||
+ return 1
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int {
|
||
+ if bytes.HasPrefix(text, []byte(""")) {
|
||
+ nextChar := byte(0)
|
||
+ if len(text) >= 7 {
|
||
+ nextChar = text[6]
|
||
+ }
|
||
+ if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) {
|
||
+ return 5
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if bytes.HasPrefix(text, []byte("�")) {
|
||
+ return 3
|
||
+ }
|
||
+
|
||
+ out.WriteByte('&')
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int {
|
||
+ var quote byte = 'd'
|
||
+ if angledQuotes {
|
||
+ quote = 'a'
|
||
+ }
|
||
+
|
||
+ return func(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ return r.smartAmpVariant(out, previousChar, text, quote, addNBSP)
|
||
+ }
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
|
||
+ out.WriteString("…")
|
||
+ return 2
|
||
+ }
|
||
+
|
||
+ if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
|
||
+ out.WriteString("…")
|
||
+ return 4
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if len(text) >= 2 && text[1] == '`' {
|
||
+ nextChar := byte(0)
|
||
+ if len(text) >= 3 {
|
||
+ nextChar = text[2]
|
||
+ }
|
||
+ if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
|
||
+ return 1
|
||
+ }
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
||
+ // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
|
||
+ // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8)
|
||
+ // and avoid changing dates like 1/23/2005 into fractions.
|
||
+ numEnd := 0
|
||
+ for len(text) > numEnd && isdigit(text[numEnd]) {
|
||
+ numEnd++
|
||
+ }
|
||
+ if numEnd == 0 {
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+ }
|
||
+ denStart := numEnd + 1
|
||
+ if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
|
||
+ denStart = numEnd + 3
|
||
+ } else if len(text) < numEnd+2 || text[numEnd] != '/' {
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+ }
|
||
+ denEnd := denStart
|
||
+ for len(text) > denEnd && isdigit(text[denEnd]) {
|
||
+ denEnd++
|
||
+ }
|
||
+ if denEnd == denStart {
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+ }
|
||
+ if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
|
||
+ out.WriteString("<sup>")
|
||
+ out.Write(text[:numEnd])
|
||
+ out.WriteString("</sup>⁄<sub>")
|
||
+ out.Write(text[denStart:denEnd])
|
||
+ out.WriteString("</sub>")
|
||
+ return denEnd - 1
|
||
+ }
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
|
||
+ if text[0] == '1' && text[1] == '/' && text[2] == '2' {
|
||
+ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
|
||
+ out.WriteString("½")
|
||
+ return 2
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if text[0] == '1' && text[1] == '/' && text[2] == '4' {
|
||
+ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
|
||
+ out.WriteString("¼")
|
||
+ return 2
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if text[0] == '3' && text[1] == '/' && text[2] == '4' {
|
||
+ if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
|
||
+ out.WriteString("¾")
|
||
+ return 2
|
||
+ }
|
||
+ }
|
||
+ }
|
||
+
|
||
+ out.WriteByte(text[0])
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int {
|
||
+ nextChar := byte(0)
|
||
+ if len(text) > 1 {
|
||
+ nextChar = text[1]
|
||
+ }
|
||
+ if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) {
|
||
+ out.WriteString(""")
|
||
+ }
|
||
+
|
||
+ return 0
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ return r.smartDoubleQuoteVariant(out, previousChar, text, 'd')
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ return r.smartDoubleQuoteVariant(out, previousChar, text, 'a')
|
||
+}
|
||
+
|
||
+func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int {
|
||
+ i := 0
|
||
+
|
||
+ for i < len(text) && text[i] != '>' {
|
||
+ i++
|
||
+ }
|
||
+
|
||
+ out.Write(text[:i+1])
|
||
+ return i
|
||
+}
|
||
+
|
||
+type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int
|
||
+
|
||
+// NewSmartypantsRenderer constructs a Smartypants renderer object.
|
||
+func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer {
|
||
+ var (
|
||
+ r SPRenderer
|
||
+
|
||
+ smartAmpAngled = r.smartAmp(true, false)
|
||
+ smartAmpAngledNBSP = r.smartAmp(true, true)
|
||
+ smartAmpRegular = r.smartAmp(false, false)
|
||
+ smartAmpRegularNBSP = r.smartAmp(false, true)
|
||
+
|
||
+ addNBSP = flags&SmartypantsQuotesNBSP != 0
|
||
+ )
|
||
+
|
||
+ if flags&SmartypantsAngledQuotes == 0 {
|
||
+ r.callbacks['"'] = r.smartDoubleQuote
|
||
+ if !addNBSP {
|
||
+ r.callbacks['&'] = smartAmpRegular
|
||
+ } else {
|
||
+ r.callbacks['&'] = smartAmpRegularNBSP
|
||
+ }
|
||
+ } else {
|
||
+ r.callbacks['"'] = r.smartAngledDoubleQuote
|
||
+ if !addNBSP {
|
||
+ r.callbacks['&'] = smartAmpAngled
|
||
+ } else {
|
||
+ r.callbacks['&'] = smartAmpAngledNBSP
|
||
+ }
|
||
+ }
|
||
+ r.callbacks['\''] = r.smartSingleQuote
|
||
+ r.callbacks['('] = r.smartParens
|
||
+ if flags&SmartypantsDashes != 0 {
|
||
+ if flags&SmartypantsLatexDashes == 0 {
|
||
+ r.callbacks['-'] = r.smartDash
|
||
+ } else {
|
||
+ r.callbacks['-'] = r.smartDashLatex
|
||
+ }
|
||
+ }
|
||
+ r.callbacks['.'] = r.smartPeriod
|
||
+ if flags&SmartypantsFractions == 0 {
|
||
+ r.callbacks['1'] = r.smartNumber
|
||
+ r.callbacks['3'] = r.smartNumber
|
||
+ } else {
|
||
+ for ch := '1'; ch <= '9'; ch++ {
|
||
+ r.callbacks[ch] = r.smartNumberGeneric
|
||
+ }
|
||
+ }
|
||
+ r.callbacks['<'] = r.smartLeftAngle
|
||
+ r.callbacks['`'] = r.smartBacktick
|
||
+ return &r
|
||
+}
|
||
+
|
||
+// Process is the entry point of the Smartypants renderer.
|
||
+func (r *SPRenderer) Process(w io.Writer, text []byte) {
|
||
+ mark := 0
|
||
+ for i := 0; i < len(text); i++ {
|
||
+ if action := r.callbacks[text[i]]; action != nil {
|
||
+ if i > mark {
|
||
+ w.Write(text[mark:i])
|
||
+ }
|
||
+ previousChar := byte(0)
|
||
+ if i > 0 {
|
||
+ previousChar = text[i-1]
|
||
+ }
|
||
+ var tmp bytes.Buffer
|
||
+ i += action(&tmp, previousChar, text[i:])
|
||
+ w.Write(tmp.Bytes())
|
||
+ mark = i + 1
|
||
+ }
|
||
+ }
|
||
+ if mark < len(text) {
|
||
+ w.Write(text[mark:])
|
||
+ }
|
||
+}
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/README.md b/vendor/github.com/spf13/cobra/doc/README.md
|
||
new file mode 100644
|
||
index 000000000000..8e07baae3307
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/README.md
|
||
@@ -0,0 +1,17 @@
|
||
+# Documentation generation
|
||
+
|
||
+- [Man page docs](./man_docs.md)
|
||
+- [Markdown docs](./md_docs.md)
|
||
+- [Rest docs](./rest_docs.md)
|
||
+- [Yaml docs](./yaml_docs.md)
|
||
+
|
||
+## Options
|
||
+### `DisableAutoGenTag`
|
||
+
|
||
+You may set `cmd.DisableAutoGenTag = true`
|
||
+to _entirely_ remove the auto generated string "Auto generated by spf13/cobra..."
|
||
+from any documentation source.
|
||
+
|
||
+### `InitDefaultCompletionCmd`
|
||
+
|
||
+You may call `cmd.InitDefaultCompletionCmd()` to document the default autocompletion command.
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.go b/vendor/github.com/spf13/cobra/doc/man_docs.go
|
||
new file mode 100644
|
||
index 000000000000..b8c15ce88543
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/man_docs.go
|
||
@@ -0,0 +1,246 @@
|
||
+// Copyright 2013-2023 The Cobra Authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package doc
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "sort"
|
||
+ "strconv"
|
||
+ "strings"
|
||
+ "time"
|
||
+
|
||
+ "github.com/cpuguy83/go-md2man/v2/md2man"
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/pflag"
|
||
+)
|
||
+
|
||
+// GenManTree will generate a man page for this command and all descendants
|
||
+// in the directory given. The header may be nil. This function may not work
|
||
+// correctly if your command names have `-` in them. If you have `cmd` with two
|
||
+// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third`
|
||
+// it is undefined which help output will be in the file `cmd-sub-third.1`.
|
||
+func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error {
|
||
+ return GenManTreeFromOpts(cmd, GenManTreeOptions{
|
||
+ Header: header,
|
||
+ Path: dir,
|
||
+ CommandSeparator: "-",
|
||
+ })
|
||
+}
|
||
+
|
||
+// GenManTreeFromOpts generates a man page for the command and all descendants.
|
||
+// The pages are written to the opts.Path directory.
|
||
+func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error {
|
||
+ header := opts.Header
|
||
+ if header == nil {
|
||
+ header = &GenManHeader{}
|
||
+ }
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ if err := GenManTreeFromOpts(c, opts); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+ section := "1"
|
||
+ if header.Section != "" {
|
||
+ section = header.Section
|
||
+ }
|
||
+
|
||
+ separator := "_"
|
||
+ if opts.CommandSeparator != "" {
|
||
+ separator = opts.CommandSeparator
|
||
+ }
|
||
+ basename := strings.ReplaceAll(cmd.CommandPath(), " ", separator)
|
||
+ filename := filepath.Join(opts.Path, basename+"."+section)
|
||
+ f, err := os.Create(filename)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer f.Close()
|
||
+
|
||
+ headerCopy := *header
|
||
+ return GenMan(cmd, &headerCopy, f)
|
||
+}
|
||
+
|
||
+// GenManTreeOptions is the options for generating the man pages.
|
||
+// Used only in GenManTreeFromOpts.
|
||
+type GenManTreeOptions struct {
|
||
+ Header *GenManHeader
|
||
+ Path string
|
||
+ CommandSeparator string
|
||
+}
|
||
+
|
||
+// GenManHeader is a lot like the .TH header at the start of man pages. These
|
||
+// include the title, section, date, source, and manual. We will use the
|
||
+// current time if Date is unset and will use "Auto generated by spf13/cobra"
|
||
+// if the Source is unset.
|
||
+type GenManHeader struct {
|
||
+ Title string
|
||
+ Section string
|
||
+ Date *time.Time
|
||
+ date string
|
||
+ Source string
|
||
+ Manual string
|
||
+}
|
||
+
|
||
+// GenMan will generate a man page for the given command and write it to
|
||
+// w. The header argument may be nil, however obviously w may not.
|
||
+func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error {
|
||
+ if header == nil {
|
||
+ header = &GenManHeader{}
|
||
+ }
|
||
+ if err := fillHeader(header, cmd.CommandPath(), cmd.DisableAutoGenTag); err != nil {
|
||
+ return err
|
||
+ }
|
||
+
|
||
+ b := genMan(cmd, header)
|
||
+ _, err := w.Write(md2man.Render(b))
|
||
+ return err
|
||
+}
|
||
+
|
||
+func fillHeader(header *GenManHeader, name string, disableAutoGen bool) error {
|
||
+ if header.Title == "" {
|
||
+ header.Title = strings.ToUpper(strings.ReplaceAll(name, " ", "\\-"))
|
||
+ }
|
||
+ if header.Section == "" {
|
||
+ header.Section = "1"
|
||
+ }
|
||
+ if header.Date == nil {
|
||
+ now := time.Now()
|
||
+ if epoch := os.Getenv("SOURCE_DATE_EPOCH"); epoch != "" {
|
||
+ unixEpoch, err := strconv.ParseInt(epoch, 10, 64)
|
||
+ if err != nil {
|
||
+ return fmt.Errorf("invalid SOURCE_DATE_EPOCH: %v", err)
|
||
+ }
|
||
+ now = time.Unix(unixEpoch, 0)
|
||
+ }
|
||
+ header.Date = &now
|
||
+ }
|
||
+ header.date = (*header.Date).Format("Jan 2006")
|
||
+ if header.Source == "" && !disableAutoGen {
|
||
+ header.Source = "Auto generated by spf13/cobra"
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func manPreamble(buf io.StringWriter, header *GenManHeader, cmd *cobra.Command, dashedName string) {
|
||
+ description := cmd.Long
|
||
+ if len(description) == 0 {
|
||
+ description = cmd.Short
|
||
+ }
|
||
+
|
||
+ cobra.WriteStringAndCheck(buf, fmt.Sprintf(`%% "%s" "%s" "%s" "%s" "%s"
|
||
+# NAME
|
||
+`, header.Title, header.Section, header.date, header.Source, header.Manual))
|
||
+ cobra.WriteStringAndCheck(buf, fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short))
|
||
+ cobra.WriteStringAndCheck(buf, "# SYNOPSIS\n")
|
||
+ cobra.WriteStringAndCheck(buf, fmt.Sprintf("**%s**\n\n", cmd.UseLine()))
|
||
+ cobra.WriteStringAndCheck(buf, "# DESCRIPTION\n")
|
||
+ cobra.WriteStringAndCheck(buf, description+"\n\n")
|
||
+}
|
||
+
|
||
+func manPrintFlags(buf io.StringWriter, flags *pflag.FlagSet) {
|
||
+ flags.VisitAll(func(flag *pflag.Flag) {
|
||
+ if len(flag.Deprecated) > 0 || flag.Hidden {
|
||
+ return
|
||
+ }
|
||
+ format := ""
|
||
+ if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
|
||
+ format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name)
|
||
+ } else {
|
||
+ format = fmt.Sprintf("**--%s**", flag.Name)
|
||
+ }
|
||
+ if len(flag.NoOptDefVal) > 0 {
|
||
+ format += "["
|
||
+ }
|
||
+ if flag.Value.Type() == "string" {
|
||
+ // put quotes on the value
|
||
+ format += "=%q"
|
||
+ } else {
|
||
+ format += "=%s"
|
||
+ }
|
||
+ if len(flag.NoOptDefVal) > 0 {
|
||
+ format += "]"
|
||
+ }
|
||
+ format += "\n\t%s\n\n"
|
||
+ cobra.WriteStringAndCheck(buf, fmt.Sprintf(format, flag.DefValue, flag.Usage))
|
||
+ })
|
||
+}
|
||
+
|
||
+func manPrintOptions(buf io.StringWriter, command *cobra.Command) {
|
||
+ flags := command.NonInheritedFlags()
|
||
+ if flags.HasAvailableFlags() {
|
||
+ cobra.WriteStringAndCheck(buf, "# OPTIONS\n")
|
||
+ manPrintFlags(buf, flags)
|
||
+ cobra.WriteStringAndCheck(buf, "\n")
|
||
+ }
|
||
+ flags = command.InheritedFlags()
|
||
+ if flags.HasAvailableFlags() {
|
||
+ cobra.WriteStringAndCheck(buf, "# OPTIONS INHERITED FROM PARENT COMMANDS\n")
|
||
+ manPrintFlags(buf, flags)
|
||
+ cobra.WriteStringAndCheck(buf, "\n")
|
||
+ }
|
||
+}
|
||
+
|
||
+func genMan(cmd *cobra.Command, header *GenManHeader) []byte {
|
||
+ cmd.InitDefaultHelpCmd()
|
||
+ cmd.InitDefaultHelpFlag()
|
||
+
|
||
+ // something like `rootcmd-subcmd1-subcmd2`
|
||
+ dashCommandName := strings.ReplaceAll(cmd.CommandPath(), " ", "-")
|
||
+
|
||
+ buf := new(bytes.Buffer)
|
||
+
|
||
+ manPreamble(buf, header, cmd, dashCommandName)
|
||
+ manPrintOptions(buf, cmd)
|
||
+ if len(cmd.Example) > 0 {
|
||
+ buf.WriteString("# EXAMPLE\n")
|
||
+ buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example))
|
||
+ }
|
||
+ if hasSeeAlso(cmd) {
|
||
+ buf.WriteString("# SEE ALSO\n")
|
||
+ seealsos := make([]string, 0)
|
||
+ if cmd.HasParent() {
|
||
+ parentPath := cmd.Parent().CommandPath()
|
||
+ dashParentPath := strings.ReplaceAll(parentPath, " ", "-")
|
||
+ seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section)
|
||
+ seealsos = append(seealsos, seealso)
|
||
+ cmd.VisitParents(func(c *cobra.Command) {
|
||
+ if c.DisableAutoGenTag {
|
||
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
|
||
+ }
|
||
+ })
|
||
+ }
|
||
+ children := cmd.Commands()
|
||
+ sort.Sort(byName(children))
|
||
+ for _, c := range children {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section)
|
||
+ seealsos = append(seealsos, seealso)
|
||
+ }
|
||
+ buf.WriteString(strings.Join(seealsos, ", ") + "\n")
|
||
+ }
|
||
+ if !cmd.DisableAutoGenTag {
|
||
+ buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006")))
|
||
+ }
|
||
+ return buf.Bytes()
|
||
+}
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/man_docs.md b/vendor/github.com/spf13/cobra/doc/man_docs.md
|
||
new file mode 100644
|
||
index 000000000000..3709160f34f0
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/man_docs.md
|
||
@@ -0,0 +1,31 @@
|
||
+# Generating Man Pages For Your Own cobra.Command
|
||
+
|
||
+Generating man pages from a cobra command is incredibly easy. An example is as follows:
|
||
+
|
||
+```go
|
||
+package main
|
||
+
|
||
+import (
|
||
+ "log"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/cobra/doc"
|
||
+)
|
||
+
|
||
+func main() {
|
||
+ cmd := &cobra.Command{
|
||
+ Use: "test",
|
||
+ Short: "my test program",
|
||
+ }
|
||
+ header := &doc.GenManHeader{
|
||
+ Title: "MINE",
|
||
+ Section: "3",
|
||
+ }
|
||
+ err := doc.GenManTree(cmd, header, "/tmp")
|
||
+ if err != nil {
|
||
+ log.Fatal(err)
|
||
+ }
|
||
+}
|
||
+```
|
||
+
|
||
+That will get you a man page `/tmp/test.3`
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.go b/vendor/github.com/spf13/cobra/doc/md_docs.go
|
||
new file mode 100644
|
||
index 000000000000..c4a27c00935b
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/md_docs.go
|
||
@@ -0,0 +1,156 @@
|
||
+// Copyright 2013-2023 The Cobra Authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package doc
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "sort"
|
||
+ "strings"
|
||
+ "time"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+)
|
||
+
|
||
+func printOptions(buf *bytes.Buffer, cmd *cobra.Command, name string) error {
|
||
+ flags := cmd.NonInheritedFlags()
|
||
+ flags.SetOutput(buf)
|
||
+ if flags.HasAvailableFlags() {
|
||
+ buf.WriteString("### Options\n\n```\n")
|
||
+ flags.PrintDefaults()
|
||
+ buf.WriteString("```\n\n")
|
||
+ }
|
||
+
|
||
+ parentFlags := cmd.InheritedFlags()
|
||
+ parentFlags.SetOutput(buf)
|
||
+ if parentFlags.HasAvailableFlags() {
|
||
+ buf.WriteString("### Options inherited from parent commands\n\n```\n")
|
||
+ parentFlags.PrintDefaults()
|
||
+ buf.WriteString("```\n\n")
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// GenMarkdown creates markdown output.
|
||
+func GenMarkdown(cmd *cobra.Command, w io.Writer) error {
|
||
+ return GenMarkdownCustom(cmd, w, func(s string) string { return s })
|
||
+}
|
||
+
|
||
+// GenMarkdownCustom creates custom markdown output.
|
||
+func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error {
|
||
+ cmd.InitDefaultHelpCmd()
|
||
+ cmd.InitDefaultHelpFlag()
|
||
+
|
||
+ buf := new(bytes.Buffer)
|
||
+ name := cmd.CommandPath()
|
||
+
|
||
+ buf.WriteString("## " + name + "\n\n")
|
||
+ buf.WriteString(cmd.Short + "\n\n")
|
||
+ if len(cmd.Long) > 0 {
|
||
+ buf.WriteString("### Synopsis\n\n")
|
||
+ buf.WriteString(cmd.Long + "\n\n")
|
||
+ }
|
||
+
|
||
+ if cmd.Runnable() {
|
||
+ buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine()))
|
||
+ }
|
||
+
|
||
+ if len(cmd.Example) > 0 {
|
||
+ buf.WriteString("### Examples\n\n")
|
||
+ buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example))
|
||
+ }
|
||
+
|
||
+ if err := printOptions(buf, cmd, name); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if hasSeeAlso(cmd) {
|
||
+ buf.WriteString("### SEE ALSO\n\n")
|
||
+ if cmd.HasParent() {
|
||
+ parent := cmd.Parent()
|
||
+ pname := parent.CommandPath()
|
||
+ link := pname + ".md"
|
||
+ link = strings.ReplaceAll(link, " ", "_")
|
||
+ buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short))
|
||
+ cmd.VisitParents(func(c *cobra.Command) {
|
||
+ if c.DisableAutoGenTag {
|
||
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
|
||
+ }
|
||
+ })
|
||
+ }
|
||
+
|
||
+ children := cmd.Commands()
|
||
+ sort.Sort(byName(children))
|
||
+
|
||
+ for _, child := range children {
|
||
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ cname := name + " " + child.Name()
|
||
+ link := cname + ".md"
|
||
+ link = strings.ReplaceAll(link, " ", "_")
|
||
+ buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short))
|
||
+ }
|
||
+ buf.WriteString("\n")
|
||
+ }
|
||
+ if !cmd.DisableAutoGenTag {
|
||
+ buf.WriteString("###### Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "\n")
|
||
+ }
|
||
+ _, err := buf.WriteTo(w)
|
||
+ return err
|
||
+}
|
||
+
|
||
+// GenMarkdownTree will generate a markdown page for this command and all
|
||
+// descendants in the directory given. The header may be nil.
|
||
+// This function may not work correctly if your command names have `-` in them.
|
||
+// If you have `cmd` with two subcmds, `sub` and `sub-third`,
|
||
+// and `sub` has a subcommand called `third`, it is undefined which
|
||
+// help output will be in the file `cmd-sub-third.1`.
|
||
+func GenMarkdownTree(cmd *cobra.Command, dir string) error {
|
||
+ identity := func(s string) string { return s }
|
||
+ emptyStr := func(s string) string { return "" }
|
||
+ return GenMarkdownTreeCustom(cmd, dir, emptyStr, identity)
|
||
+}
|
||
+
|
||
+// GenMarkdownTreeCustom is the the same as GenMarkdownTree, but
|
||
+// with custom filePrepender and linkHandler.
|
||
+func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+
|
||
+ basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".md"
|
||
+ filename := filepath.Join(dir, basename)
|
||
+ f, err := os.Create(filename)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer f.Close()
|
||
+
|
||
+ if _, err := io.WriteString(f, filePrepender(filename)); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err := GenMarkdownCustom(cmd, f, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/md_docs.md b/vendor/github.com/spf13/cobra/doc/md_docs.md
|
||
new file mode 100644
|
||
index 000000000000..1659175cfdab
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/md_docs.md
|
||
@@ -0,0 +1,115 @@
|
||
+# Generating Markdown Docs For Your Own cobra.Command
|
||
+
|
||
+Generating Markdown pages from a cobra command is incredibly easy. An example is as follows:
|
||
+
|
||
+```go
|
||
+package main
|
||
+
|
||
+import (
|
||
+ "log"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/cobra/doc"
|
||
+)
|
||
+
|
||
+func main() {
|
||
+ cmd := &cobra.Command{
|
||
+ Use: "test",
|
||
+ Short: "my test program",
|
||
+ }
|
||
+ err := doc.GenMarkdownTree(cmd, "/tmp")
|
||
+ if err != nil {
|
||
+ log.Fatal(err)
|
||
+ }
|
||
+}
|
||
+```
|
||
+
|
||
+That will get you a Markdown document `/tmp/test.md`
|
||
+
|
||
+## Generate markdown docs for the entire command tree
|
||
+
|
||
+This program can actually generate docs for the kubectl command in the kubernetes project
|
||
+
|
||
+```go
|
||
+package main
|
||
+
|
||
+import (
|
||
+ "log"
|
||
+ "io/ioutil"
|
||
+ "os"
|
||
+
|
||
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
|
||
+ cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||
+
|
||
+ "github.com/spf13/cobra/doc"
|
||
+)
|
||
+
|
||
+func main() {
|
||
+ kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
|
||
+ err := doc.GenMarkdownTree(kubectl, "./")
|
||
+ if err != nil {
|
||
+ log.Fatal(err)
|
||
+ }
|
||
+}
|
||
+```
|
||
+
|
||
+This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
|
||
+
|
||
+## Generate markdown docs for a single command
|
||
+
|
||
+You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree`
|
||
+
|
||
+```go
|
||
+ out := new(bytes.Buffer)
|
||
+ err := doc.GenMarkdown(cmd, out)
|
||
+ if err != nil {
|
||
+ log.Fatal(err)
|
||
+ }
|
||
+```
|
||
+
|
||
+This will write the markdown doc for ONLY "cmd" into the out, buffer.
|
||
+
|
||
+## Customize the output
|
||
+
|
||
+Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output:
|
||
+
|
||
+```go
|
||
+func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error {
|
||
+ //...
|
||
+}
|
||
+```
|
||
+
|
||
+```go
|
||
+func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error {
|
||
+ //...
|
||
+}
|
||
+```
|
||
+
|
||
+The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/):
|
||
+
|
||
+```go
|
||
+const fmTemplate = `---
|
||
+date: %s
|
||
+title: "%s"
|
||
+slug: %s
|
||
+url: %s
|
||
+---
|
||
+`
|
||
+
|
||
+filePrepender := func(filename string) string {
|
||
+ now := time.Now().Format(time.RFC3339)
|
||
+ name := filepath.Base(filename)
|
||
+ base := strings.TrimSuffix(name, path.Ext(name))
|
||
+ url := "/commands/" + strings.ToLower(base) + "/"
|
||
+ return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
|
||
+}
|
||
+```
|
||
+
|
||
+The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename:
|
||
+
|
||
+```go
|
||
+linkHandler := func(name string) string {
|
||
+ base := strings.TrimSuffix(name, path.Ext(name))
|
||
+ return "/commands/" + strings.ToLower(base) + "/"
|
||
+}
|
||
+```
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.go b/vendor/github.com/spf13/cobra/doc/rest_docs.go
|
||
new file mode 100644
|
||
index 000000000000..2cca6fd778de
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/rest_docs.go
|
||
@@ -0,0 +1,186 @@
|
||
+// Copyright 2013-2023 The Cobra Authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package doc
|
||
+
|
||
+import (
|
||
+ "bytes"
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "sort"
|
||
+ "strings"
|
||
+ "time"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+)
|
||
+
|
||
+func printOptionsReST(buf *bytes.Buffer, cmd *cobra.Command, name string) error {
|
||
+ flags := cmd.NonInheritedFlags()
|
||
+ flags.SetOutput(buf)
|
||
+ if flags.HasAvailableFlags() {
|
||
+ buf.WriteString("Options\n")
|
||
+ buf.WriteString("~~~~~~~\n\n::\n\n")
|
||
+ flags.PrintDefaults()
|
||
+ buf.WriteString("\n")
|
||
+ }
|
||
+
|
||
+ parentFlags := cmd.InheritedFlags()
|
||
+ parentFlags.SetOutput(buf)
|
||
+ if parentFlags.HasAvailableFlags() {
|
||
+ buf.WriteString("Options inherited from parent commands\n")
|
||
+ buf.WriteString("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n")
|
||
+ parentFlags.PrintDefaults()
|
||
+ buf.WriteString("\n")
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// defaultLinkHandler for default ReST hyperlink markup
|
||
+func defaultLinkHandler(name, ref string) string {
|
||
+ return fmt.Sprintf("`%s <%s.rst>`_", name, ref)
|
||
+}
|
||
+
|
||
+// GenReST creates reStructured Text output.
|
||
+func GenReST(cmd *cobra.Command, w io.Writer) error {
|
||
+ return GenReSTCustom(cmd, w, defaultLinkHandler)
|
||
+}
|
||
+
|
||
+// GenReSTCustom creates custom reStructured Text output.
|
||
+func GenReSTCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string, string) string) error {
|
||
+ cmd.InitDefaultHelpCmd()
|
||
+ cmd.InitDefaultHelpFlag()
|
||
+
|
||
+ buf := new(bytes.Buffer)
|
||
+ name := cmd.CommandPath()
|
||
+
|
||
+ short := cmd.Short
|
||
+ long := cmd.Long
|
||
+ if len(long) == 0 {
|
||
+ long = short
|
||
+ }
|
||
+ ref := strings.ReplaceAll(name, " ", "_")
|
||
+
|
||
+ buf.WriteString(".. _" + ref + ":\n\n")
|
||
+ buf.WriteString(name + "\n")
|
||
+ buf.WriteString(strings.Repeat("-", len(name)) + "\n\n")
|
||
+ buf.WriteString(short + "\n\n")
|
||
+ buf.WriteString("Synopsis\n")
|
||
+ buf.WriteString("~~~~~~~~\n\n")
|
||
+ buf.WriteString("\n" + long + "\n\n")
|
||
+
|
||
+ if cmd.Runnable() {
|
||
+ buf.WriteString(fmt.Sprintf("::\n\n %s\n\n", cmd.UseLine()))
|
||
+ }
|
||
+
|
||
+ if len(cmd.Example) > 0 {
|
||
+ buf.WriteString("Examples\n")
|
||
+ buf.WriteString("~~~~~~~~\n\n")
|
||
+ buf.WriteString(fmt.Sprintf("::\n\n%s\n\n", indentString(cmd.Example, " ")))
|
||
+ }
|
||
+
|
||
+ if err := printOptionsReST(buf, cmd, name); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if hasSeeAlso(cmd) {
|
||
+ buf.WriteString("SEE ALSO\n")
|
||
+ buf.WriteString("~~~~~~~~\n\n")
|
||
+ if cmd.HasParent() {
|
||
+ parent := cmd.Parent()
|
||
+ pname := parent.CommandPath()
|
||
+ ref = strings.ReplaceAll(pname, " ", "_")
|
||
+ buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(pname, ref), parent.Short))
|
||
+ cmd.VisitParents(func(c *cobra.Command) {
|
||
+ if c.DisableAutoGenTag {
|
||
+ cmd.DisableAutoGenTag = c.DisableAutoGenTag
|
||
+ }
|
||
+ })
|
||
+ }
|
||
+
|
||
+ children := cmd.Commands()
|
||
+ sort.Sort(byName(children))
|
||
+
|
||
+ for _, child := range children {
|
||
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ cname := name + " " + child.Name()
|
||
+ ref = strings.ReplaceAll(cname, " ", "_")
|
||
+ buf.WriteString(fmt.Sprintf("* %s \t - %s\n", linkHandler(cname, ref), child.Short))
|
||
+ }
|
||
+ buf.WriteString("\n")
|
||
+ }
|
||
+ if !cmd.DisableAutoGenTag {
|
||
+ buf.WriteString("*Auto generated by spf13/cobra on " + time.Now().Format("2-Jan-2006") + "*\n")
|
||
+ }
|
||
+ _, err := buf.WriteTo(w)
|
||
+ return err
|
||
+}
|
||
+
|
||
+// GenReSTTree will generate a ReST page for this command and all
|
||
+// descendants in the directory given.
|
||
+// This function may not work correctly if your command names have `-` in them.
|
||
+// If you have `cmd` with two subcmds, `sub` and `sub-third`,
|
||
+// and `sub` has a subcommand called `third`, it is undefined which
|
||
+// help output will be in the file `cmd-sub-third.1`.
|
||
+func GenReSTTree(cmd *cobra.Command, dir string) error {
|
||
+ emptyStr := func(s string) string { return "" }
|
||
+ return GenReSTTreeCustom(cmd, dir, emptyStr, defaultLinkHandler)
|
||
+}
|
||
+
|
||
+// GenReSTTreeCustom is the the same as GenReSTTree, but
|
||
+// with custom filePrepender and linkHandler.
|
||
+func GenReSTTreeCustom(cmd *cobra.Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error {
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ if err := GenReSTTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+
|
||
+ basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".rst"
|
||
+ filename := filepath.Join(dir, basename)
|
||
+ f, err := os.Create(filename)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer f.Close()
|
||
+
|
||
+ if _, err := io.WriteString(f, filePrepender(filename)); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err := GenReSTCustom(cmd, f, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// indentString adapted from: https://github.com/kr/text/blob/main/indent.go
|
||
+func indentString(s, p string) string {
|
||
+ var res []byte
|
||
+ b := []byte(s)
|
||
+ prefix := []byte(p)
|
||
+ bol := true
|
||
+ for _, c := range b {
|
||
+ if bol && c != '\n' {
|
||
+ res = append(res, prefix...)
|
||
+ }
|
||
+ res = append(res, c)
|
||
+ bol = c == '\n'
|
||
+ }
|
||
+ return string(res)
|
||
+}
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/rest_docs.md b/vendor/github.com/spf13/cobra/doc/rest_docs.md
|
||
new file mode 100644
|
||
index 000000000000..3041c573ab01
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/rest_docs.md
|
||
@@ -0,0 +1,114 @@
|
||
+# Generating ReStructured Text Docs For Your Own cobra.Command
|
||
+
|
||
+Generating ReST pages from a cobra command is incredibly easy. An example is as follows:
|
||
+
|
||
+```go
|
||
+package main
|
||
+
|
||
+import (
|
||
+ "log"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/cobra/doc"
|
||
+)
|
||
+
|
||
+func main() {
|
||
+ cmd := &cobra.Command{
|
||
+ Use: "test",
|
||
+ Short: "my test program",
|
||
+ }
|
||
+ err := doc.GenReSTTree(cmd, "/tmp")
|
||
+ if err != nil {
|
||
+ log.Fatal(err)
|
||
+ }
|
||
+}
|
||
+```
|
||
+
|
||
+That will get you a ReST document `/tmp/test.rst`
|
||
+
|
||
+## Generate ReST docs for the entire command tree
|
||
+
|
||
+This program can actually generate docs for the kubectl command in the kubernetes project
|
||
+
|
||
+```go
|
||
+package main
|
||
+
|
||
+import (
|
||
+ "log"
|
||
+ "io/ioutil"
|
||
+ "os"
|
||
+
|
||
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
|
||
+ cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||
+
|
||
+ "github.com/spf13/cobra/doc"
|
||
+)
|
||
+
|
||
+func main() {
|
||
+ kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
|
||
+ err := doc.GenReSTTree(kubectl, "./")
|
||
+ if err != nil {
|
||
+ log.Fatal(err)
|
||
+ }
|
||
+}
|
||
+```
|
||
+
|
||
+This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
|
||
+
|
||
+## Generate ReST docs for a single command
|
||
+
|
||
+You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenReST` instead of `GenReSTTree`
|
||
+
|
||
+```go
|
||
+ out := new(bytes.Buffer)
|
||
+ err := doc.GenReST(cmd, out)
|
||
+ if err != nil {
|
||
+ log.Fatal(err)
|
||
+ }
|
||
+```
|
||
+
|
||
+This will write the ReST doc for ONLY "cmd" into the out, buffer.
|
||
+
|
||
+## Customize the output
|
||
+
|
||
+Both `GenReST` and `GenReSTTree` have alternate versions with callbacks to get some control of the output:
|
||
+
|
||
+```go
|
||
+func GenReSTTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string, string) string) error {
|
||
+ //...
|
||
+}
|
||
+```
|
||
+
|
||
+```go
|
||
+func GenReSTCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string, string) string) error {
|
||
+ //...
|
||
+}
|
||
+```
|
||
+
|
||
+The `filePrepender` will prepend the return value given the full filepath to the rendered ReST file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/):
|
||
+
|
||
+```go
|
||
+const fmTemplate = `---
|
||
+date: %s
|
||
+title: "%s"
|
||
+slug: %s
|
||
+url: %s
|
||
+---
|
||
+`
|
||
+filePrepender := func(filename string) string {
|
||
+ now := time.Now().Format(time.RFC3339)
|
||
+ name := filepath.Base(filename)
|
||
+ base := strings.TrimSuffix(name, path.Ext(name))
|
||
+ url := "/commands/" + strings.ToLower(base) + "/"
|
||
+ return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
|
||
+}
|
||
+```
|
||
+
|
||
+The `linkHandler` can be used to customize the rendered links to the commands, given a command name and reference. This is useful while converting rst to html or while generating documentation with tools like Sphinx where `:ref:` is used:
|
||
+
|
||
+```go
|
||
+// Sphinx cross-referencing format
|
||
+linkHandler := func(name, ref string) string {
|
||
+ return fmt.Sprintf(":ref:`%s <%s>`", name, ref)
|
||
+}
|
||
+```
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/util.go b/vendor/github.com/spf13/cobra/doc/util.go
|
||
new file mode 100644
|
||
index 000000000000..0aaa07a166d8
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/util.go
|
||
@@ -0,0 +1,52 @@
|
||
+// Copyright 2013-2023 The Cobra Authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package doc
|
||
+
|
||
+import (
|
||
+ "strings"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+)
|
||
+
|
||
+// Test to see if we have a reason to print See Also information in docs
|
||
+// Basically this is a test for a parent command or a subcommand which is
|
||
+// both not deprecated and not the autogenerated help command.
|
||
+func hasSeeAlso(cmd *cobra.Command) bool {
|
||
+ if cmd.HasParent() {
|
||
+ return true
|
||
+ }
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ return true
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// Temporary workaround for yaml lib generating incorrect yaml with long strings
|
||
+// that do not contain \n.
|
||
+func forceMultiLine(s string) string {
|
||
+ if len(s) > 60 && !strings.Contains(s, "\n") {
|
||
+ s = s + "\n"
|
||
+ }
|
||
+ return s
|
||
+}
|
||
+
|
||
+type byName []*cobra.Command
|
||
+
|
||
+func (s byName) Len() int { return len(s) }
|
||
+func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||
+func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.go b/vendor/github.com/spf13/cobra/doc/yaml_docs.go
|
||
new file mode 100644
|
||
index 000000000000..2b26d6ec0f3e
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.go
|
||
@@ -0,0 +1,175 @@
|
||
+// Copyright 2013-2023 The Cobra Authors
|
||
+//
|
||
+// Licensed under the Apache License, Version 2.0 (the "License");
|
||
+// you may not use this file except in compliance with the License.
|
||
+// You may obtain a copy of the License at
|
||
+//
|
||
+// http://www.apache.org/licenses/LICENSE-2.0
|
||
+//
|
||
+// Unless required by applicable law or agreed to in writing, software
|
||
+// distributed under the License is distributed on an "AS IS" BASIS,
|
||
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+// See the License for the specific language governing permissions and
|
||
+// limitations under the License.
|
||
+
|
||
+package doc
|
||
+
|
||
+import (
|
||
+ "fmt"
|
||
+ "io"
|
||
+ "os"
|
||
+ "path/filepath"
|
||
+ "sort"
|
||
+ "strings"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/pflag"
|
||
+ "gopkg.in/yaml.v3"
|
||
+)
|
||
+
|
||
+type cmdOption struct {
|
||
+ Name string
|
||
+ Shorthand string `yaml:",omitempty"`
|
||
+ DefaultValue string `yaml:"default_value,omitempty"`
|
||
+ Usage string `yaml:",omitempty"`
|
||
+}
|
||
+
|
||
+type cmdDoc struct {
|
||
+ Name string
|
||
+ Synopsis string `yaml:",omitempty"`
|
||
+ Description string `yaml:",omitempty"`
|
||
+ Usage string `yaml:",omitempty"`
|
||
+ Options []cmdOption `yaml:",omitempty"`
|
||
+ InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"`
|
||
+ Example string `yaml:",omitempty"`
|
||
+ SeeAlso []string `yaml:"see_also,omitempty"`
|
||
+}
|
||
+
|
||
+// GenYamlTree creates yaml structured ref files for this command and all descendants
|
||
+// in the directory given. This function may not work
|
||
+// correctly if your command names have `-` in them. If you have `cmd` with two
|
||
+// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third`
|
||
+// it is undefined which help output will be in the file `cmd-sub-third.1`.
|
||
+func GenYamlTree(cmd *cobra.Command, dir string) error {
|
||
+ identity := func(s string) string { return s }
|
||
+ emptyStr := func(s string) string { return "" }
|
||
+ return GenYamlTreeCustom(cmd, dir, emptyStr, identity)
|
||
+}
|
||
+
|
||
+// GenYamlTreeCustom creates yaml structured ref files.
|
||
+func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
|
||
+ for _, c := range cmd.Commands() {
|
||
+ if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ }
|
||
+
|
||
+ basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".yaml"
|
||
+ filename := filepath.Join(dir, basename)
|
||
+ f, err := os.Create(filename)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ defer f.Close()
|
||
+
|
||
+ if _, err := io.WriteString(f, filePrepender(filename)); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err := GenYamlCustom(cmd, f, linkHandler); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// GenYaml creates yaml output.
|
||
+func GenYaml(cmd *cobra.Command, w io.Writer) error {
|
||
+ return GenYamlCustom(cmd, w, func(s string) string { return s })
|
||
+}
|
||
+
|
||
+// GenYamlCustom creates custom yaml output.
|
||
+func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) string) error {
|
||
+ cmd.InitDefaultHelpCmd()
|
||
+ cmd.InitDefaultHelpFlag()
|
||
+
|
||
+ yamlDoc := cmdDoc{}
|
||
+ yamlDoc.Name = cmd.CommandPath()
|
||
+
|
||
+ yamlDoc.Synopsis = forceMultiLine(cmd.Short)
|
||
+ yamlDoc.Description = forceMultiLine(cmd.Long)
|
||
+
|
||
+ if cmd.Runnable() {
|
||
+ yamlDoc.Usage = cmd.UseLine()
|
||
+ }
|
||
+
|
||
+ if len(cmd.Example) > 0 {
|
||
+ yamlDoc.Example = cmd.Example
|
||
+ }
|
||
+
|
||
+ flags := cmd.NonInheritedFlags()
|
||
+ if flags.HasFlags() {
|
||
+ yamlDoc.Options = genFlagResult(flags)
|
||
+ }
|
||
+ flags = cmd.InheritedFlags()
|
||
+ if flags.HasFlags() {
|
||
+ yamlDoc.InheritedOptions = genFlagResult(flags)
|
||
+ }
|
||
+
|
||
+ if hasSeeAlso(cmd) {
|
||
+ result := []string{}
|
||
+ if cmd.HasParent() {
|
||
+ parent := cmd.Parent()
|
||
+ result = append(result, parent.CommandPath()+" - "+parent.Short)
|
||
+ }
|
||
+ children := cmd.Commands()
|
||
+ sort.Sort(byName(children))
|
||
+ for _, child := range children {
|
||
+ if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
|
||
+ continue
|
||
+ }
|
||
+ result = append(result, child.CommandPath()+" - "+child.Short)
|
||
+ }
|
||
+ yamlDoc.SeeAlso = result
|
||
+ }
|
||
+
|
||
+ final, err := yaml.Marshal(&yamlDoc)
|
||
+ if err != nil {
|
||
+ fmt.Println(err)
|
||
+ os.Exit(1)
|
||
+ }
|
||
+
|
||
+ if _, err := w.Write(final); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return nil
|
||
+}
|
||
+
|
||
+func genFlagResult(flags *pflag.FlagSet) []cmdOption {
|
||
+ var result []cmdOption
|
||
+
|
||
+ flags.VisitAll(func(flag *pflag.Flag) {
|
||
+ // Todo, when we mark a shorthand is deprecated, but specify an empty message.
|
||
+ // The flag.ShorthandDeprecated is empty as the shorthand is deprecated.
|
||
+ // Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok.
|
||
+ if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 {
|
||
+ opt := cmdOption{
|
||
+ flag.Name,
|
||
+ flag.Shorthand,
|
||
+ flag.DefValue,
|
||
+ forceMultiLine(flag.Usage),
|
||
+ }
|
||
+ result = append(result, opt)
|
||
+ } else {
|
||
+ opt := cmdOption{
|
||
+ Name: flag.Name,
|
||
+ DefaultValue: forceMultiLine(flag.DefValue),
|
||
+ Usage: forceMultiLine(flag.Usage),
|
||
+ }
|
||
+ result = append(result, opt)
|
||
+ }
|
||
+ })
|
||
+
|
||
+ return result
|
||
+}
|
||
diff --git a/vendor/github.com/spf13/cobra/doc/yaml_docs.md b/vendor/github.com/spf13/cobra/doc/yaml_docs.md
|
||
new file mode 100644
|
||
index 000000000000..172e61d12145
|
||
--- /dev/null
|
||
+++ b/vendor/github.com/spf13/cobra/doc/yaml_docs.md
|
||
@@ -0,0 +1,112 @@
|
||
+# Generating Yaml Docs For Your Own cobra.Command
|
||
+
|
||
+Generating yaml files from a cobra command is incredibly easy. An example is as follows:
|
||
+
|
||
+```go
|
||
+package main
|
||
+
|
||
+import (
|
||
+ "log"
|
||
+
|
||
+ "github.com/spf13/cobra"
|
||
+ "github.com/spf13/cobra/doc"
|
||
+)
|
||
+
|
||
+func main() {
|
||
+ cmd := &cobra.Command{
|
||
+ Use: "test",
|
||
+ Short: "my test program",
|
||
+ }
|
||
+ err := doc.GenYamlTree(cmd, "/tmp")
|
||
+ if err != nil {
|
||
+ log.Fatal(err)
|
||
+ }
|
||
+}
|
||
+```
|
||
+
|
||
+That will get you a Yaml document `/tmp/test.yaml`
|
||
+
|
||
+## Generate yaml docs for the entire command tree
|
||
+
|
||
+This program can actually generate docs for the kubectl command in the kubernetes project
|
||
+
|
||
+```go
|
||
+package main
|
||
+
|
||
+import (
|
||
+ "io/ioutil"
|
||
+ "log"
|
||
+ "os"
|
||
+
|
||
+ "k8s.io/kubernetes/pkg/kubectl/cmd"
|
||
+ cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||
+
|
||
+ "github.com/spf13/cobra/doc"
|
||
+)
|
||
+
|
||
+func main() {
|
||
+ kubectl := cmd.NewKubectlCommand(cmdutil.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
|
||
+ err := doc.GenYamlTree(kubectl, "./")
|
||
+ if err != nil {
|
||
+ log.Fatal(err)
|
||
+ }
|
||
+}
|
||
+```
|
||
+
|
||
+This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./")
|
||
+
|
||
+## Generate yaml docs for a single command
|
||
+
|
||
+You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenYaml` instead of `GenYamlTree`
|
||
+
|
||
+```go
|
||
+ out := new(bytes.Buffer)
|
||
+ doc.GenYaml(cmd, out)
|
||
+```
|
||
+
|
||
+This will write the yaml doc for ONLY "cmd" into the out, buffer.
|
||
+
|
||
+## Customize the output
|
||
+
|
||
+Both `GenYaml` and `GenYamlTree` have alternate versions with callbacks to get some control of the output:
|
||
+
|
||
+```go
|
||
+func GenYamlTreeCustom(cmd *Command, dir string, filePrepender, linkHandler func(string) string) error {
|
||
+ //...
|
||
+}
|
||
+```
|
||
+
|
||
+```go
|
||
+func GenYamlCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) error {
|
||
+ //...
|
||
+}
|
||
+```
|
||
+
|
||
+The `filePrepender` will prepend the return value given the full filepath to the rendered Yaml file. A common use case is to add front matter to use the generated documentation with [Hugo](https://gohugo.io/):
|
||
+
|
||
+```go
|
||
+const fmTemplate = `---
|
||
+date: %s
|
||
+title: "%s"
|
||
+slug: %s
|
||
+url: %s
|
||
+---
|
||
+`
|
||
+
|
||
+filePrepender := func(filename string) string {
|
||
+ now := time.Now().Format(time.RFC3339)
|
||
+ name := filepath.Base(filename)
|
||
+ base := strings.TrimSuffix(name, path.Ext(name))
|
||
+ url := "/commands/" + strings.ToLower(base) + "/"
|
||
+ return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url)
|
||
+}
|
||
+```
|
||
+
|
||
+The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename:
|
||
+
|
||
+```go
|
||
+linkHandler := func(name string) string {
|
||
+ base := strings.TrimSuffix(name, path.Ext(name))
|
||
+ return "/commands/" + strings.ToLower(base) + "/"
|
||
+}
|
||
+```
|
||
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
|
||
index f34a38e4e95f..a6b5081888ba 100644
|
||
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
|
||
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
|
||
@@ -1,4 +1,4 @@
|
||
-// Copyright 2020 Google LLC
|
||
+// Copyright 2022 Google LLC
|
||
//
|
||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||
// you may not use this file except in compliance with the License.
|
||
@@ -15,7 +15,7 @@
|
||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||
// versions:
|
||
// protoc-gen-go v1.26.0
|
||
-// protoc v3.12.2
|
||
+// protoc v3.21.9
|
||
// source: google/rpc/status.proto
|
||
|
||
package status
|
||
@@ -48,11 +48,13 @@ type Status struct {
|
||
sizeCache protoimpl.SizeCache
|
||
unknownFields protoimpl.UnknownFields
|
||
|
||
- // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
|
||
+ // The status code, which should be an enum value of
|
||
+ // [google.rpc.Code][google.rpc.Code].
|
||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||
// A developer-facing error message, which should be in English. Any
|
||
// user-facing error message should be localized and sent in the
|
||
- // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
|
||
+ // [google.rpc.Status.details][google.rpc.Status.details] field, or localized
|
||
+ // by the client.
|
||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||
// A list of messages that carry the error details. There is a common set of
|
||
// message types for APIs to use.
|
||
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
|
||
index ae13ddac14e0..02f5dc531891 100644
|
||
--- a/vendor/google.golang.org/grpc/attributes/attributes.go
|
||
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
|
||
@@ -19,7 +19,7 @@
|
||
// Package attributes defines a generic key/value store used in various gRPC
|
||
// components.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
|
||
index 542594f5cc51..29475e31c979 100644
|
||
--- a/vendor/google.golang.org/grpc/backoff.go
|
||
+++ b/vendor/google.golang.org/grpc/backoff.go
|
||
@@ -48,7 +48,7 @@ type BackoffConfig struct {
|
||
// here for more details:
|
||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
|
||
index f4f9408f3852..09d61dd1b55b 100644
|
||
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
|
||
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
|
||
@@ -110,6 +110,11 @@ type SubConn interface {
|
||
UpdateAddresses([]resolver.Address)
|
||
// Connect starts the connecting for this SubConn.
|
||
Connect()
|
||
+ // GetOrBuildProducer returns a reference to the existing Producer for this
|
||
+ // ProducerBuilder in this SubConn, or, if one does not currently exist,
|
||
+ // creates a new one and returns it. Returns a close function which must
|
||
+ // be called when the Producer is no longer needed.
|
||
+ GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
|
||
}
|
||
|
||
// NewSubConnOptions contains options to create new SubConn.
|
||
@@ -274,6 +279,14 @@ type PickResult struct {
|
||
// type, Done may not be called. May be nil if the balancer does not wish
|
||
// to be notified when the RPC completes.
|
||
Done func(DoneInfo)
|
||
+
|
||
+ // Metadata provides a way for LB policies to inject arbitrary per-call
|
||
+ // metadata. Any metadata returned here will be merged with existing
|
||
+ // metadata added by the client application.
|
||
+ //
|
||
+ // LB policies with child policies are responsible for propagating metadata
|
||
+ // injected by their children to the ClientConn, as part of Pick().
|
||
+ Metatada metadata.MD
|
||
}
|
||
|
||
// TransientFailureError returns e. It exists for backward compatibility and
|
||
@@ -371,3 +384,21 @@ type ClientConnState struct {
|
||
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
|
||
// problem with the provided name resolver data.
|
||
var ErrBadResolverState = errors.New("bad resolver state")
|
||
+
|
||
+// A ProducerBuilder is a simple constructor for a Producer. It is used by the
|
||
+// SubConn to create producers when needed.
|
||
+type ProducerBuilder interface {
|
||
+ // Build creates a Producer. The first parameter is always a
|
||
+ // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
|
||
+ // associated SubConn), but is declared as interface{} to avoid a
|
||
+ // dependency cycle. Should also return a close function that will be
|
||
+ // called when all references to the Producer have been given up.
|
||
+ Build(grpcClientConnInterface interface{}) (p Producer, close func())
|
||
+}
|
||
+
|
||
+// A Producer is a type shared among potentially many consumers. It is
|
||
+// associated with a SubConn, and an implementation will typically contain
|
||
+// other methods to provide additional functionality, e.g. configuration or
|
||
+// subscription registration.
|
||
+type Producer interface {
|
||
+}
|
||
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
|
||
index e8dfc828aaac..3929c26d31e1 100644
|
||
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
|
||
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
|
||
@@ -157,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error {
|
||
|
||
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
||
// from it. The picker is
|
||
-// - errPicker if the balancer is in TransientFailure,
|
||
-// - built by the pickerBuilder with all READY SubConns otherwise.
|
||
+// - errPicker if the balancer is in TransientFailure,
|
||
+// - built by the pickerBuilder with all READY SubConns otherwise.
|
||
func (b *baseBalancer) regeneratePicker() {
|
||
if b.state == connectivity.TransientFailure {
|
||
b.picker = NewErrPicker(b.mergeErrors())
|
||
diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
|
||
index a87b6809af38..c33413581091 100644
|
||
--- a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
|
||
+++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
|
||
@@ -34,10 +34,10 @@ type ConnectivityStateEvaluator struct {
|
||
// RecordTransition records state change happening in subConn and based on that
|
||
// it evaluates what aggregated state should be.
|
||
//
|
||
-// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||
-// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||
-// - Else if at least one SubConn is Idle, the aggregated state is Idle;
|
||
-// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
|
||
+// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||
+// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||
+// - Else if at least one SubConn is Idle, the aggregated state is Idle;
|
||
+// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
|
||
//
|
||
// Shutdown is not considered.
|
||
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
||
@@ -55,7 +55,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne
|
||
cse.numIdle += updateVal
|
||
}
|
||
}
|
||
+ return cse.CurrentState()
|
||
+}
|
||
|
||
+// CurrentState returns the current aggregate conn state by evaluating the counters
|
||
+func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
|
||
// Evaluate.
|
||
if cse.numReady > 0 {
|
||
return connectivity.Ready
|
||
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
|
||
index b1c23eaae0db..0359956d36fa 100644
|
||
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
|
||
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
|
||
@@ -19,17 +19,20 @@
|
||
package grpc
|
||
|
||
import (
|
||
+ "context"
|
||
"fmt"
|
||
"strings"
|
||
"sync"
|
||
|
||
"google.golang.org/grpc/balancer"
|
||
+ "google.golang.org/grpc/codes"
|
||
"google.golang.org/grpc/connectivity"
|
||
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||
"google.golang.org/grpc/internal/buffer"
|
||
"google.golang.org/grpc/internal/channelz"
|
||
"google.golang.org/grpc/internal/grpcsync"
|
||
"google.golang.org/grpc/resolver"
|
||
+ "google.golang.org/grpc/status"
|
||
)
|
||
|
||
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
||
@@ -305,7 +308,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
|
||
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
|
||
return nil, err
|
||
}
|
||
- acbw := &acBalancerWrapper{ac: ac}
|
||
+ acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
|
||
acbw.ac.mu.Lock()
|
||
ac.acbw = acbw
|
||
acbw.ac.mu.Unlock()
|
||
@@ -359,8 +362,9 @@ func (ccb *ccBalancerWrapper) Target() string {
|
||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||
// It implements balancer.SubConn interface.
|
||
type acBalancerWrapper struct {
|
||
- mu sync.Mutex
|
||
- ac *addrConn
|
||
+ mu sync.Mutex
|
||
+ ac *addrConn
|
||
+ producers map[balancer.ProducerBuilder]*refCountedProducer
|
||
}
|
||
|
||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||
@@ -414,3 +418,64 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
|
||
defer acbw.mu.Unlock()
|
||
return acbw.ac
|
||
}
|
||
+
|
||
+var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected")
|
||
+
|
||
+// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
|
||
+// ready, returns errSubConnNotReady.
|
||
+func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||
+ transport := acbw.ac.getReadyTransport()
|
||
+ if transport == nil {
|
||
+ return nil, errSubConnNotReady
|
||
+ }
|
||
+ return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
|
||
+}
|
||
+
|
||
+// Invoke performs a unary RPC. If the addrConn is not ready, returns
|
||
+// errSubConnNotReady.
|
||
+func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error {
|
||
+ cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
|
||
+ if err != nil {
|
||
+ return err
|
||
+ }
|
||
+ if err := cs.SendMsg(args); err != nil {
|
||
+ return err
|
||
+ }
|
||
+ return cs.RecvMsg(reply)
|
||
+}
|
||
+
|
||
+type refCountedProducer struct {
|
||
+ producer balancer.Producer
|
||
+ refs int // number of current refs to the producer
|
||
+ close func() // underlying producer's close function
|
||
+}
|
||
+
|
||
+func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
|
||
+ acbw.mu.Lock()
|
||
+ defer acbw.mu.Unlock()
|
||
+
|
||
+ // Look up existing producer from this builder.
|
||
+ pData := acbw.producers[pb]
|
||
+ if pData == nil {
|
||
+ // Not found; create a new one and add it to the producers map.
|
||
+ p, close := pb.Build(acbw)
|
||
+ pData = &refCountedProducer{producer: p, close: close}
|
||
+ acbw.producers[pb] = pData
|
||
+ }
|
||
+ // Account for this new reference.
|
||
+ pData.refs++
|
||
+
|
||
+ // Return a cleanup function wrapped in a OnceFunc to remove this reference
|
||
+ // and delete the refCountedProducer from the map if the total reference
|
||
+ // count goes to zero.
|
||
+ unref := func() {
|
||
+ acbw.mu.Lock()
|
||
+ pData.refs--
|
||
+ if pData.refs == 0 {
|
||
+ defer pData.close() // Run outside the acbw mutex
|
||
+ delete(acbw.producers, pb)
|
||
+ }
|
||
+ acbw.mu.Unlock()
|
||
+ }
|
||
+ return pData.producer, grpcsync.OnceFunc(unref)
|
||
+}
|
||
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
|
||
index ed75290cdf34..66d141fce707 100644
|
||
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
|
||
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
|
||
@@ -18,14 +18,13 @@
|
||
|
||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||
// versions:
|
||
-// protoc-gen-go v1.25.0
|
||
+// protoc-gen-go v1.28.1
|
||
// protoc v3.14.0
|
||
// source: grpc/binlog/v1/binarylog.proto
|
||
|
||
package grpc_binarylog_v1
|
||
|
||
import (
|
||
- proto "github.com/golang/protobuf/proto"
|
||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||
@@ -41,10 +40,6 @@ const (
|
||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||
)
|
||
|
||
-// This is a compile-time assertion that a sufficiently up-to-date version
|
||
-// of the legacy proto package is being used.
|
||
-const _ = proto.ProtoPackageIsVersion4
|
||
-
|
||
// Enumerates the type of event
|
||
// Note the terminology is different from the RPC semantics
|
||
// definition, but the same meaning is expressed here.
|
||
@@ -261,6 +256,7 @@ type GrpcLogEntry struct {
|
||
// according to the type of the log entry.
|
||
//
|
||
// Types that are assignable to Payload:
|
||
+ //
|
||
// *GrpcLogEntry_ClientHeader
|
||
// *GrpcLogEntry_ServerHeader
|
||
// *GrpcLogEntry_Message
|
||
@@ -694,12 +690,12 @@ func (x *Message) GetData() []byte {
|
||
// Header keys added by gRPC are omitted. To be more specific,
|
||
// implementations will not log the following entries, and this is
|
||
// not to be treated as a truncation:
|
||
-// - entries handled by grpc that are not user visible, such as those
|
||
-// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
||
-// or keys like 'lb-token'
|
||
-// - transport specific entries, including but not limited to:
|
||
-// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
||
-// - entries added for call credentials
|
||
+// - entries handled by grpc that are not user visible, such as those
|
||
+// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
||
+// or keys like 'lb-token'
|
||
+// - transport specific entries, including but not limited to:
|
||
+// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
||
+// - entries added for call credentials
|
||
//
|
||
// Implementations must always log grpc-trace-bin if it is present.
|
||
// Practically speaking it will only be visible on server side because
|
||
diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go
|
||
index a220c47c59a5..32b7fa5794e1 100644
|
||
--- a/vendor/google.golang.org/grpc/channelz/channelz.go
|
||
+++ b/vendor/google.golang.org/grpc/channelz/channelz.go
|
||
@@ -23,7 +23,7 @@
|
||
// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
|
||
// the `internal/channelz` package.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: All APIs in this package are experimental and may be removed in a
|
||
// later release.
|
||
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
|
||
index 779b03bca1c3..d607d4e9e243 100644
|
||
--- a/vendor/google.golang.org/grpc/clientconn.go
|
||
+++ b/vendor/google.golang.org/grpc/clientconn.go
|
||
@@ -256,7 +256,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
- cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts)
|
||
+ cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
@@ -503,7 +503,7 @@ type ClientConn struct {
|
||
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
||
// ctx expires. A true value is returned in former case and false in latter.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -522,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
|
||
|
||
// GetState returns the connectivity.State of ClientConn.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
||
// release.
|
||
@@ -534,7 +534,7 @@ func (cc *ClientConn) GetState() connectivity.State {
|
||
// the channel is idle. Does not wait for the connection attempts to begin
|
||
// before returning.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
|
||
// release.
|
||
@@ -761,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
|
||
|
||
// Target returns the target string of the ClientConn.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() {
|
||
func (ac *addrConn) connect() error {
|
||
ac.mu.Lock()
|
||
if ac.state == connectivity.Shutdown {
|
||
+ if logger.V(2) {
|
||
+ logger.Infof("connect called on shutdown addrConn; ignoring.")
|
||
+ }
|
||
ac.mu.Unlock()
|
||
return errConnClosing
|
||
}
|
||
if ac.state != connectivity.Idle {
|
||
+ if logger.V(2) {
|
||
+ logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state)
|
||
+ }
|
||
ac.mu.Unlock()
|
||
return nil
|
||
}
|
||
@@ -831,9 +837,9 @@ func equalAddresses(a, b []resolver.Address) bool {
|
||
//
|
||
// If ac is Ready, it checks whether current connected address of ac is in the
|
||
// new addrs list.
|
||
-// - If true, it updates ac.addrs and returns true. The ac will keep using
|
||
-// the existing connection.
|
||
-// - If false, it does nothing and returns false.
|
||
+// - If true, it updates ac.addrs and returns true. The ac will keep using
|
||
+// the existing connection.
|
||
+// - If false, it does nothing and returns false.
|
||
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
|
||
ac.mu.Lock()
|
||
defer ac.mu.Unlock()
|
||
@@ -928,7 +934,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
|
||
return cc.sc.healthCheckConfig
|
||
}
|
||
|
||
-func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||
+func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
|
||
return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
|
||
Ctx: ctx,
|
||
FullMethodName: method,
|
||
@@ -998,7 +1004,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
|
||
// However, if a previously unavailable network becomes available, this may be
|
||
// used to trigger an immediate reconnect.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -1228,111 +1234,79 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
||
// address was not successfully connected, or updates ac appropriately with the
|
||
// new transport.
|
||
func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
|
||
- // TODO: Delete prefaceReceived and move the logic to wait for it into the
|
||
- // transport.
|
||
- prefaceReceived := grpcsync.NewEvent()
|
||
- connClosed := grpcsync.NewEvent()
|
||
-
|
||
addr.ServerName = ac.cc.getServerName(addr)
|
||
hctx, hcancel := context.WithCancel(ac.ctx)
|
||
- hcStarted := false // protected by ac.mu
|
||
|
||
- onClose := func() {
|
||
+ onClose := func(r transport.GoAwayReason) {
|
||
ac.mu.Lock()
|
||
defer ac.mu.Unlock()
|
||
- defer connClosed.Fire()
|
||
- defer hcancel()
|
||
- if !hcStarted || hctx.Err() != nil {
|
||
- // We didn't start the health check or set the state to READY, so
|
||
- // no need to do anything else here.
|
||
- //
|
||
- // OR, we have already cancelled the health check context, meaning
|
||
- // we have already called onClose once for this transport. In this
|
||
- // case it would be dangerous to clear the transport and update the
|
||
- // state, since there may be a new transport in this addrConn.
|
||
+ // adjust params based on GoAwayReason
|
||
+ ac.adjustParams(r)
|
||
+ if ac.state == connectivity.Shutdown {
|
||
+ // Already shut down. tearDown() already cleared the transport and
|
||
+ // canceled hctx via ac.ctx, and we expected this connection to be
|
||
+ // closed, so do nothing here.
|
||
+ return
|
||
+ }
|
||
+ hcancel()
|
||
+ if ac.transport == nil {
|
||
+ // We're still connecting to this address, which could error. Do
|
||
+ // not update the connectivity state or resolve; these will happen
|
||
+ // at the end of the tryAllAddrs connection loop in the event of an
|
||
+ // error.
|
||
return
|
||
}
|
||
ac.transport = nil
|
||
- // Refresh the name resolver
|
||
+ // Refresh the name resolver on any connection loss.
|
||
ac.cc.resolveNow(resolver.ResolveNowOptions{})
|
||
- if ac.state != connectivity.Shutdown {
|
||
- ac.updateConnectivityState(connectivity.Idle, nil)
|
||
- }
|
||
- }
|
||
-
|
||
- onGoAway := func(r transport.GoAwayReason) {
|
||
- ac.mu.Lock()
|
||
- ac.adjustParams(r)
|
||
- ac.mu.Unlock()
|
||
- onClose()
|
||
+ // Always go idle and wait for the LB policy to initiate a new
|
||
+ // connection attempt.
|
||
+ ac.updateConnectivityState(connectivity.Idle, nil)
|
||
}
|
||
|
||
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
||
defer cancel()
|
||
copts.ChannelzParentID = ac.channelzID
|
||
|
||
- newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose)
|
||
+ newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
|
||
if err != nil {
|
||
+ if logger.V(2) {
|
||
+ logger.Infof("Creating new client transport to %q: %v", addr, err)
|
||
+ }
|
||
// newTr is either nil, or closed.
|
||
hcancel()
|
||
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
|
||
return err
|
||
}
|
||
|
||
- select {
|
||
- case <-connectCtx.Done():
|
||
- // We didn't get the preface in time.
|
||
+ ac.mu.Lock()
|
||
+ defer ac.mu.Unlock()
|
||
+ if ac.state == connectivity.Shutdown {
|
||
+ // This can happen if the subConn was removed while in `Connecting`
|
||
+ // state. tearDown() would have set the state to `Shutdown`, but
|
||
+ // would not have closed the transport since ac.transport would not
|
||
+ // have been set at that point.
|
||
+ //
|
||
+ // We run this in a goroutine because newTr.Close() calls onClose()
|
||
+ // inline, which requires locking ac.mu.
|
||
+ //
|
||
// The error we pass to Close() is immaterial since there are no open
|
||
// streams at this point, so no trailers with error details will be sent
|
||
// out. We just need to pass a non-nil error.
|
||
- newTr.Close(transport.ErrConnClosing)
|
||
- if connectCtx.Err() == context.DeadlineExceeded {
|
||
- err := errors.New("failed to receive server preface within timeout")
|
||
- channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err)
|
||
- return err
|
||
- }
|
||
+ go newTr.Close(transport.ErrConnClosing)
|
||
return nil
|
||
- case <-prefaceReceived.Done():
|
||
- // We got the preface - huzzah! things are good.
|
||
- ac.mu.Lock()
|
||
- defer ac.mu.Unlock()
|
||
- if connClosed.HasFired() {
|
||
- // onClose called first; go idle but do nothing else.
|
||
- if ac.state != connectivity.Shutdown {
|
||
- ac.updateConnectivityState(connectivity.Idle, nil)
|
||
- }
|
||
- return nil
|
||
- }
|
||
- if ac.state == connectivity.Shutdown {
|
||
- // This can happen if the subConn was removed while in `Connecting`
|
||
- // state. tearDown() would have set the state to `Shutdown`, but
|
||
- // would not have closed the transport since ac.transport would not
|
||
- // been set at that point.
|
||
- //
|
||
- // We run this in a goroutine because newTr.Close() calls onClose()
|
||
- // inline, which requires locking ac.mu.
|
||
- //
|
||
- // The error we pass to Close() is immaterial since there are no open
|
||
- // streams at this point, so no trailers with error details will be sent
|
||
- // out. We just need to pass a non-nil error.
|
||
- go newTr.Close(transport.ErrConnClosing)
|
||
- return nil
|
||
- }
|
||
- ac.curAddr = addr
|
||
- ac.transport = newTr
|
||
- hcStarted = true
|
||
- ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
|
||
+ }
|
||
+ if hctx.Err() != nil {
|
||
+ // onClose was already called for this connection, but the connection
|
||
+ // was successfully established first. Consider it a success and set
|
||
+ // the new state to Idle.
|
||
+ ac.updateConnectivityState(connectivity.Idle, nil)
|
||
return nil
|
||
- case <-connClosed.Done():
|
||
- // The transport has already closed. If we received the preface, too,
|
||
- // this is not an error.
|
||
- select {
|
||
- case <-prefaceReceived.Done():
|
||
- return nil
|
||
- default:
|
||
- return errors.New("connection closed before server preface received")
|
||
- }
|
||
}
|
||
+ ac.curAddr = addr
|
||
+ ac.transport = newTr
|
||
+ ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
|
||
+ return nil
|
||
}
|
||
|
||
// startHealthCheck starts the health checking stream (RPC) to watch the health
|
||
@@ -1402,7 +1376,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
||
if status.Code(err) == codes.Unimplemented {
|
||
channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
|
||
} else {
|
||
- channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
|
||
+ channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err)
|
||
}
|
||
}
|
||
}()
|
||
@@ -1583,7 +1557,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
|
||
channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
|
||
} else {
|
||
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
|
||
- rb = cc.getResolver(parsedTarget.Scheme)
|
||
+ rb = cc.getResolver(parsedTarget.URL.Scheme)
|
||
if rb != nil {
|
||
cc.parsedTarget = parsedTarget
|
||
return rb, nil
|
||
@@ -1604,39 +1578,26 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
|
||
return nil, err
|
||
}
|
||
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
|
||
- rb = cc.getResolver(parsedTarget.Scheme)
|
||
+ rb = cc.getResolver(parsedTarget.URL.Scheme)
|
||
if rb == nil {
|
||
- return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme)
|
||
+ return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
|
||
}
|
||
cc.parsedTarget = parsedTarget
|
||
return rb, nil
|
||
}
|
||
|
||
// parseTarget uses RFC 3986 semantics to parse the given target into a
|
||
-// resolver.Target struct containing scheme, authority and endpoint. Query
|
||
+// resolver.Target struct containing scheme, authority and url. Query
|
||
// params are stripped from the endpoint.
|
||
func parseTarget(target string) (resolver.Target, error) {
|
||
u, err := url.Parse(target)
|
||
if err != nil {
|
||
return resolver.Target{}, err
|
||
}
|
||
- // For targets of the form "[scheme]://[authority]/endpoint, the endpoint
|
||
- // value returned from url.Parse() contains a leading "/". Although this is
|
||
- // in accordance with RFC 3986, we do not want to break existing resolver
|
||
- // implementations which expect the endpoint without the leading "/". So, we
|
||
- // end up stripping the leading "/" here. But this will result in an
|
||
- // incorrect parsing for something like "unix:///path/to/socket". Since we
|
||
- // own the "unix" resolver, we can workaround in the unix resolver by using
|
||
- // the `URL` field instead of the `Endpoint` field.
|
||
- endpoint := u.Path
|
||
- if endpoint == "" {
|
||
- endpoint = u.Opaque
|
||
- }
|
||
- endpoint = strings.TrimPrefix(endpoint, "/")
|
||
+
|
||
return resolver.Target{
|
||
Scheme: u.Scheme,
|
||
Authority: u.Host,
|
||
- Endpoint: endpoint,
|
||
URL: *u,
|
||
}, nil
|
||
}
|
||
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
|
||
index 96ff1877e754..5feac3aa0e41 100644
|
||
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
|
||
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
|
||
@@ -36,16 +36,16 @@ import (
|
||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||
// attach security information to every RPC (e.g., oauth2).
|
||
type PerRPCCredentials interface {
|
||
- // GetRequestMetadata gets the current request metadata, refreshing
|
||
- // tokens if required. This should be called by the transport layer on
|
||
- // each request, and the data should be populated in headers or other
|
||
- // context. If a status code is returned, it will be used as the status
|
||
- // for the RPC. uri is the URI of the entry point for the request.
|
||
- // When supported by the underlying implementation, ctx can be used for
|
||
- // timeout and cancellation. Additionally, RequestInfo data will be
|
||
- // available via ctx to this call.
|
||
- // TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||
- // it as an arbitrary string.
|
||
+ // GetRequestMetadata gets the current request metadata, refreshing tokens
|
||
+ // if required. This should be called by the transport layer on each
|
||
+ // request, and the data should be populated in headers or other
|
||
+ // context. If a status code is returned, it will be used as the status for
|
||
+ // the RPC (restricted to an allowable set of codes as defined by gRFC
|
||
+ // A54). uri is the URI of the entry point for the request. When supported
|
||
+ // by the underlying implementation, ctx can be used for timeout and
|
||
+ // cancellation. Additionally, RequestInfo data will be available via ctx
|
||
+ // to this call. TODO(zhaoq): Define the set of the qualified keys instead
|
||
+ // of leaving it as an arbitrary string.
|
||
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
||
// RequireTransportSecurity indicates whether the credentials requires
|
||
// transport security.
|
||
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
|
||
index 784822d0560a..877b7cd21af7 100644
|
||
--- a/vendor/google.golang.org/grpc/credentials/tls.go
|
||
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
|
||
@@ -23,9 +23,9 @@ import (
|
||
"crypto/tls"
|
||
"crypto/x509"
|
||
"fmt"
|
||
- "io/ioutil"
|
||
"net"
|
||
"net/url"
|
||
+ "os"
|
||
|
||
credinternal "google.golang.org/grpc/internal/credentials"
|
||
)
|
||
@@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor
|
||
// it will override the virtual host name of authority (e.g. :authority header
|
||
// field) in requests.
|
||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||
- b, err := ioutil.ReadFile(certFile)
|
||
+ b, err := os.ReadFile(certFile)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
@@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
|
||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
|
||
index 9372dc322e80..4866da101c60 100644
|
||
--- a/vendor/google.golang.org/grpc/dialoptions.go
|
||
+++ b/vendor/google.golang.org/grpc/dialoptions.go
|
||
@@ -44,6 +44,7 @@ func init() {
|
||
extraDialOptions = nil
|
||
}
|
||
internal.WithBinaryLogger = withBinaryLogger
|
||
+ internal.JoinDialOptions = newJoinDialOption
|
||
}
|
||
|
||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||
@@ -111,13 +112,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
|
||
}
|
||
}
|
||
|
||
+type joinDialOption struct {
|
||
+ opts []DialOption
|
||
+}
|
||
+
|
||
+func (jdo *joinDialOption) apply(do *dialOptions) {
|
||
+ for _, opt := range jdo.opts {
|
||
+ opt.apply(do)
|
||
+ }
|
||
+}
|
||
+
|
||
+func newJoinDialOption(opts ...DialOption) DialOption {
|
||
+ return &joinDialOption{opts: opts}
|
||
+}
|
||
+
|
||
// WithWriteBufferSize determines how much data can be batched before doing a
|
||
// write on the wire. The corresponding memory allocation for this buffer will
|
||
// be twice the size to keep syscalls low. The default value for this buffer is
|
||
// 32KB.
|
||
//
|
||
-// Zero will disable the write buffer such that each write will be on underlying
|
||
-// connection. Note: A Send call may not directly translate to a write.
|
||
+// Zero or negative values will disable the write buffer such that each write
|
||
+// will be on underlying connection. Note: A Send call may not directly
|
||
+// translate to a write.
|
||
func WithWriteBufferSize(s int) DialOption {
|
||
return newFuncDialOption(func(o *dialOptions) {
|
||
o.copts.WriteBufferSize = s
|
||
@@ -127,8 +143,9 @@ func WithWriteBufferSize(s int) DialOption {
|
||
// WithReadBufferSize lets you set the size of read buffer, this determines how
|
||
// much data can be read at most for each read syscall.
|
||
//
|
||
-// The default value for this buffer is 32KB. Zero will disable read buffer for
|
||
-// a connection so data framer can access the underlying conn directly.
|
||
+// The default value for this buffer is 32KB. Zero or negative values will
|
||
+// disable read buffer for a connection so data framer can access the
|
||
+// underlying conn directly.
|
||
func WithReadBufferSize(s int) DialOption {
|
||
return newFuncDialOption(func(o *dialOptions) {
|
||
o.copts.ReadBufferSize = s
|
||
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
|
||
index 18e530fc9024..07a5861352a6 100644
|
||
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
|
||
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
|
||
@@ -19,7 +19,7 @@
|
||
// Package encoding defines the interface for the compressor and codec, and
|
||
// functions to register and retrieve compressors and codecs.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -28,6 +28,8 @@ package encoding
|
||
import (
|
||
"io"
|
||
"strings"
|
||
+
|
||
+ "google.golang.org/grpc/internal/grpcutil"
|
||
)
|
||
|
||
// Identity specifies the optional encoding for uncompressed streams.
|
||
@@ -73,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor)
|
||
// registered with the same name, the one registered last will take effect.
|
||
func RegisterCompressor(c Compressor) {
|
||
registeredCompressor[c.Name()] = c
|
||
+ if !grpcutil.IsCompressorNameRegistered(c.Name()) {
|
||
+ grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
|
||
+ }
|
||
}
|
||
|
||
// GetCompressor returns Compressor for the given compressor name.
|
||
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
|
||
index 7c1f66409034..5de66e40d365 100644
|
||
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
|
||
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
|
||
@@ -22,7 +22,6 @@ import (
|
||
"encoding/json"
|
||
"fmt"
|
||
"io"
|
||
- "io/ioutil"
|
||
"log"
|
||
"os"
|
||
"strconv"
|
||
@@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config)
|
||
// newLoggerV2 creates a loggerV2 to be used as default logger.
|
||
// All logs are written to stderr.
|
||
func newLoggerV2() LoggerV2 {
|
||
- errorW := ioutil.Discard
|
||
- warningW := ioutil.Discard
|
||
- infoW := ioutil.Discard
|
||
+ errorW := io.Discard
|
||
+ warningW := io.Discard
|
||
+ infoW := io.Discard
|
||
|
||
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
|
||
switch logLevel {
|
||
@@ -242,7 +241,7 @@ func (g *loggerT) V(l int) bool {
|
||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||
// depth set for trivial functions the logger may ignore.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
|
||
index c5579e65065f..f9e80e27ab68 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
|
||
@@ -30,15 +30,15 @@ import (
|
||
// to build a new logger and assign it to binarylog.Logger.
|
||
//
|
||
// Example filter config strings:
|
||
-// - "" Nothing will be logged
|
||
-// - "*" All headers and messages will be fully logged.
|
||
-// - "*{h}" Only headers will be logged.
|
||
-// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||
-// - "Foo/*" Logs every method in service Foo
|
||
-// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||
-// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||
-// /Foo/Bar, logs all headers and messages in every other method in service
|
||
-// Foo.
|
||
+// - "" Nothing will be logged
|
||
+// - "*" All headers and messages will be fully logged.
|
||
+// - "*{h}" Only headers will be logged.
|
||
+// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||
+// - "Foo/*" Logs every method in service Foo
|
||
+// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||
+// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||
+// /Foo/Bar, logs all headers and messages in every other method in service
|
||
+// Foo.
|
||
//
|
||
// If two configs exist for one certain method or service, the one specified
|
||
// later overrides the previous config.
|
||
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
|
||
index 179f4a26d135..d71e441778f4 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
|
||
@@ -26,7 +26,7 @@ import (
|
||
|
||
"github.com/golang/protobuf/proto"
|
||
"github.com/golang/protobuf/ptypes"
|
||
- pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||
+ binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||
"google.golang.org/grpc/metadata"
|
||
"google.golang.org/grpc/status"
|
||
)
|
||
@@ -79,7 +79,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
||
// Build is an internal only method for building the proto message out of the
|
||
// input event. It's made public to enable other library to reuse as much logic
|
||
// in TruncatingMethodLogger as possible.
|
||
-func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
||
+func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
|
||
m := c.toProto()
|
||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||
m.Timestamp = timestamp
|
||
@@ -87,11 +87,11 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
||
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
||
|
||
switch pay := m.Payload.(type) {
|
||
- case *pb.GrpcLogEntry_ClientHeader:
|
||
+ case *binlogpb.GrpcLogEntry_ClientHeader:
|
||
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
|
||
- case *pb.GrpcLogEntry_ServerHeader:
|
||
+ case *binlogpb.GrpcLogEntry_ServerHeader:
|
||
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
|
||
- case *pb.GrpcLogEntry_Message:
|
||
+ case *binlogpb.GrpcLogEntry_Message:
|
||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||
}
|
||
return m
|
||
@@ -102,7 +102,7 @@ func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) {
|
||
ml.sink.Write(ml.Build(c))
|
||
}
|
||
|
||
-func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||
+func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
|
||
if ml.headerMaxLen == maxUInt {
|
||
return false
|
||
}
|
||
@@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated
|
||
// but not counted towards the size limit.
|
||
continue
|
||
}
|
||
- currentEntryLen := uint64(len(entry.Value))
|
||
+ currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
|
||
if currentEntryLen > bytesLimit {
|
||
break
|
||
}
|
||
@@ -132,7 +132,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated
|
||
return truncated
|
||
}
|
||
|
||
-func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||
+func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
|
||
if ml.messageMaxLen == maxUInt {
|
||
return false
|
||
}
|
||
@@ -145,7 +145,7 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated
|
||
|
||
// LogEntryConfig represents the configuration for binary log entry.
|
||
type LogEntryConfig interface {
|
||
- toProto() *pb.GrpcLogEntry
|
||
+ toProto() *binlogpb.GrpcLogEntry
|
||
}
|
||
|
||
// ClientHeader configs the binary log entry to be a ClientHeader entry.
|
||
@@ -159,10 +159,10 @@ type ClientHeader struct {
|
||
PeerAddr net.Addr
|
||
}
|
||
|
||
-func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||
+func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
|
||
// This function doesn't need to set all the fields (e.g. seq ID). The Log
|
||
// function will set the fields when necessary.
|
||
- clientHeader := &pb.ClientHeader{
|
||
+ clientHeader := &binlogpb.ClientHeader{
|
||
Metadata: mdToMetadataProto(c.Header),
|
||
MethodName: c.MethodName,
|
||
Authority: c.Authority,
|
||
@@ -170,16 +170,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||
if c.Timeout > 0 {
|
||
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
||
}
|
||
- ret := &pb.GrpcLogEntry{
|
||
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||
- Payload: &pb.GrpcLogEntry_ClientHeader{
|
||
+ ret := &binlogpb.GrpcLogEntry{
|
||
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||
+ Payload: &binlogpb.GrpcLogEntry_ClientHeader{
|
||
ClientHeader: clientHeader,
|
||
},
|
||
}
|
||
if c.OnClientSide {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||
} else {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||
}
|
||
if c.PeerAddr != nil {
|
||
ret.Peer = addrToProto(c.PeerAddr)
|
||
@@ -195,19 +195,19 @@ type ServerHeader struct {
|
||
PeerAddr net.Addr
|
||
}
|
||
|
||
-func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
|
||
- ret := &pb.GrpcLogEntry{
|
||
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||
- Payload: &pb.GrpcLogEntry_ServerHeader{
|
||
- ServerHeader: &pb.ServerHeader{
|
||
+func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
|
||
+ ret := &binlogpb.GrpcLogEntry{
|
||
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||
+ Payload: &binlogpb.GrpcLogEntry_ServerHeader{
|
||
+ ServerHeader: &binlogpb.ServerHeader{
|
||
Metadata: mdToMetadataProto(c.Header),
|
||
},
|
||
},
|
||
}
|
||
if c.OnClientSide {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||
} else {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||
}
|
||
if c.PeerAddr != nil {
|
||
ret.Peer = addrToProto(c.PeerAddr)
|
||
@@ -223,7 +223,7 @@ type ClientMessage struct {
|
||
Message interface{}
|
||
}
|
||
|
||
-func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||
+func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
|
||
var (
|
||
data []byte
|
||
err error
|
||
@@ -238,19 +238,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||
} else {
|
||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||
}
|
||
- ret := &pb.GrpcLogEntry{
|
||
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||
- Payload: &pb.GrpcLogEntry_Message{
|
||
- Message: &pb.Message{
|
||
+ ret := &binlogpb.GrpcLogEntry{
|
||
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||
+ Payload: &binlogpb.GrpcLogEntry_Message{
|
||
+ Message: &binlogpb.Message{
|
||
Length: uint32(len(data)),
|
||
Data: data,
|
||
},
|
||
},
|
||
}
|
||
if c.OnClientSide {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||
} else {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||
}
|
||
return ret
|
||
}
|
||
@@ -263,7 +263,7 @@ type ServerMessage struct {
|
||
Message interface{}
|
||
}
|
||
|
||
-func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||
+func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
|
||
var (
|
||
data []byte
|
||
err error
|
||
@@ -278,19 +278,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||
} else {
|
||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||
}
|
||
- ret := &pb.GrpcLogEntry{
|
||
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||
- Payload: &pb.GrpcLogEntry_Message{
|
||
- Message: &pb.Message{
|
||
+ ret := &binlogpb.GrpcLogEntry{
|
||
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||
+ Payload: &binlogpb.GrpcLogEntry_Message{
|
||
+ Message: &binlogpb.Message{
|
||
Length: uint32(len(data)),
|
||
Data: data,
|
||
},
|
||
},
|
||
}
|
||
if c.OnClientSide {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||
} else {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||
}
|
||
return ret
|
||
}
|
||
@@ -300,15 +300,15 @@ type ClientHalfClose struct {
|
||
OnClientSide bool
|
||
}
|
||
|
||
-func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
|
||
- ret := &pb.GrpcLogEntry{
|
||
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||
+func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
|
||
+ ret := &binlogpb.GrpcLogEntry{
|
||
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||
Payload: nil, // No payload here.
|
||
}
|
||
if c.OnClientSide {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||
} else {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||
}
|
||
return ret
|
||
}
|
||
@@ -324,7 +324,7 @@ type ServerTrailer struct {
|
||
PeerAddr net.Addr
|
||
}
|
||
|
||
-func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||
+func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
|
||
st, ok := status.FromError(c.Err)
|
||
if !ok {
|
||
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
||
@@ -340,10 +340,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||
}
|
||
}
|
||
- ret := &pb.GrpcLogEntry{
|
||
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||
- Payload: &pb.GrpcLogEntry_Trailer{
|
||
- Trailer: &pb.Trailer{
|
||
+ ret := &binlogpb.GrpcLogEntry{
|
||
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||
+ Payload: &binlogpb.GrpcLogEntry_Trailer{
|
||
+ Trailer: &binlogpb.Trailer{
|
||
Metadata: mdToMetadataProto(c.Trailer),
|
||
StatusCode: uint32(st.Code()),
|
||
StatusMessage: st.Message(),
|
||
@@ -352,9 +352,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||
},
|
||
}
|
||
if c.OnClientSide {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||
} else {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||
}
|
||
if c.PeerAddr != nil {
|
||
ret.Peer = addrToProto(c.PeerAddr)
|
||
@@ -367,15 +367,15 @@ type Cancel struct {
|
||
OnClientSide bool
|
||
}
|
||
|
||
-func (c *Cancel) toProto() *pb.GrpcLogEntry {
|
||
- ret := &pb.GrpcLogEntry{
|
||
- Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||
+func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
|
||
+ ret := &binlogpb.GrpcLogEntry{
|
||
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||
Payload: nil,
|
||
}
|
||
if c.OnClientSide {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||
} else {
|
||
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||
}
|
||
return ret
|
||
}
|
||
@@ -392,15 +392,15 @@ func metadataKeyOmit(key string) bool {
|
||
return strings.HasPrefix(key, "grpc-")
|
||
}
|
||
|
||
-func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||
- ret := &pb.Metadata{}
|
||
+func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
|
||
+ ret := &binlogpb.Metadata{}
|
||
for k, vv := range md {
|
||
if metadataKeyOmit(k) {
|
||
continue
|
||
}
|
||
for _, v := range vv {
|
||
ret.Entry = append(ret.Entry,
|
||
- &pb.MetadataEntry{
|
||
+ &binlogpb.MetadataEntry{
|
||
Key: k,
|
||
Value: []byte(v),
|
||
},
|
||
@@ -410,26 +410,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||
return ret
|
||
}
|
||
|
||
-func addrToProto(addr net.Addr) *pb.Address {
|
||
- ret := &pb.Address{}
|
||
+func addrToProto(addr net.Addr) *binlogpb.Address {
|
||
+ ret := &binlogpb.Address{}
|
||
switch a := addr.(type) {
|
||
case *net.TCPAddr:
|
||
if a.IP.To4() != nil {
|
||
- ret.Type = pb.Address_TYPE_IPV4
|
||
+ ret.Type = binlogpb.Address_TYPE_IPV4
|
||
} else if a.IP.To16() != nil {
|
||
- ret.Type = pb.Address_TYPE_IPV6
|
||
+ ret.Type = binlogpb.Address_TYPE_IPV6
|
||
} else {
|
||
- ret.Type = pb.Address_TYPE_UNKNOWN
|
||
+ ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||
// Do not set address and port fields.
|
||
break
|
||
}
|
||
ret.Address = a.IP.String()
|
||
ret.IpPort = uint32(a.Port)
|
||
case *net.UnixAddr:
|
||
- ret.Type = pb.Address_TYPE_UNIX
|
||
+ ret.Type = binlogpb.Address_TYPE_UNIX
|
||
ret.Address = a.String()
|
||
default:
|
||
- ret.Type = pb.Address_TYPE_UNKNOWN
|
||
+ ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||
}
|
||
return ret
|
||
}
|
||
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
|
||
index c2fdd58b3198..264de387c2a5 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
|
||
@@ -26,7 +26,7 @@ import (
|
||
"time"
|
||
|
||
"github.com/golang/protobuf/proto"
|
||
- pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||
+ binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||
)
|
||
|
||
var (
|
||
@@ -42,15 +42,15 @@ type Sink interface {
|
||
// Write will be called to write the log entry into the sink.
|
||
//
|
||
// It should be thread-safe so it can be called in parallel.
|
||
- Write(*pb.GrpcLogEntry) error
|
||
+ Write(*binlogpb.GrpcLogEntry) error
|
||
// Close will be called when the Sink is replaced by a new Sink.
|
||
Close() error
|
||
}
|
||
|
||
type noopSink struct{}
|
||
|
||
-func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
|
||
-func (ns *noopSink) Close() error { return nil }
|
||
+func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
|
||
+func (ns *noopSink) Close() error { return nil }
|
||
|
||
// newWriterSink creates a binary log sink with the given writer.
|
||
//
|
||
@@ -66,7 +66,7 @@ type writerSink struct {
|
||
out io.Writer
|
||
}
|
||
|
||
-func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
||
+func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||
b, err := proto.Marshal(e)
|
||
if err != nil {
|
||
grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
|
||
@@ -96,7 +96,7 @@ type bufferedSink struct {
|
||
done chan struct{}
|
||
}
|
||
|
||
-func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
|
||
+func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||
fs.mu.Lock()
|
||
defer fs.mu.Unlock()
|
||
if !fs.flusherStarted {
|
||
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
|
||
index ad0ce4dabf06..7b2f350e2e64 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/channelz/types.go
|
||
@@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) {
|
||
|
||
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
||
// The delete process includes two steps:
|
||
-// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||
-// parent's child list.
|
||
-// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||
-// will return entry not found error.
|
||
+// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||
+// parent's child list.
|
||
+// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||
+// will return entry not found error.
|
||
func (c *channel) deleteSelfIfReady() {
|
||
if !c.deleteSelfFromTree() {
|
||
return
|
||
@@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) {
|
||
|
||
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
||
// The delete process includes two steps:
|
||
-// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||
-// its parent's child list.
|
||
-// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||
-// by id will return entry not found error.
|
||
+// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||
+// its parent's child list.
|
||
+// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||
+// by id will return entry not found error.
|
||
func (sc *subChannel) deleteSelfIfReady() {
|
||
if !sc.deleteSelfFromTree() {
|
||
return
|
||
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
|
||
index 6f0272543110..5ba9d94d49c2 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
|
||
@@ -21,15 +21,42 @@ package envconfig
|
||
|
||
import (
|
||
"os"
|
||
+ "strconv"
|
||
"strings"
|
||
)
|
||
|
||
-const (
|
||
- prefix = "GRPC_GO_"
|
||
- txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
||
-)
|
||
-
|
||
var (
|
||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||
- TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
||
+ TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
|
||
+ // AdvertiseCompressors is set if registered compressor should be advertised
|
||
+ // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
|
||
+ AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
|
||
+ // RingHashCap indicates the maximum ring size which defaults to 4096
|
||
+ // entries but may be overridden by setting the environment variable
|
||
+ // "GRPC_RING_HASH_CAP". This does not override the default bounds
|
||
+ // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
||
+ RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
||
)
|
||
+
|
||
+func boolFromEnv(envVar string, def bool) bool {
|
||
+ if def {
|
||
+ // The default is true; return true unless the variable is "false".
|
||
+ return !strings.EqualFold(os.Getenv(envVar), "false")
|
||
+ }
|
||
+ // The default is false; return false unless the variable is "true".
|
||
+ return strings.EqualFold(os.Getenv(envVar), "true")
|
||
+}
|
||
+
|
||
+func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
|
||
+ v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
|
||
+ if err != nil {
|
||
+ return def
|
||
+ }
|
||
+ if v < min {
|
||
+ return min
|
||
+ }
|
||
+ if v > max {
|
||
+ return max
|
||
+ }
|
||
+ return v
|
||
+}
|
||
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
|
||
index af09711a3e88..04136882c7bc 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
|
||
@@ -20,7 +20,6 @@ package envconfig
|
||
|
||
import (
|
||
"os"
|
||
- "strings"
|
||
)
|
||
|
||
const (
|
||
@@ -36,16 +35,6 @@ const (
|
||
//
|
||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
||
-
|
||
- ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
|
||
- clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
|
||
- aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||
- rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
||
- outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
|
||
- federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
||
- rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
|
||
-
|
||
- c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
||
)
|
||
|
||
var (
|
||
@@ -64,38 +53,40 @@ var (
|
||
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
||
// disabled by setting the environment variable
|
||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
||
- XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
|
||
+ XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
|
||
// XDSClientSideSecurity is used to control processing of security
|
||
// configuration on the client-side.
|
||
//
|
||
// Note that there is no env var protection for the server-side because we
|
||
// have a brand new API on the server-side and users explicitly need to use
|
||
// the new API to get security integration on the server.
|
||
- XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
|
||
+ XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
|
||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster
|
||
// and DNS cluster is enabled, which can be enabled by setting the
|
||
// environment variable
|
||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
||
// "true".
|
||
- XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false")
|
||
+ XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
|
||
|
||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||
// which can be disabled by setting the environment variable
|
||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||
- XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
|
||
+ XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
|
||
// XDSOutlierDetection indicates whether outlier detection support is
|
||
// enabled, which can be disabled by setting the environment variable
|
||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
|
||
- XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false")
|
||
- // XDSFederation indicates whether federation support is enabled.
|
||
- XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
||
+ XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
|
||
+ // XDSFederation indicates whether federation support is enabled, which can
|
||
+ // be enabled by setting the environment variable
|
||
+ // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
|
||
+ XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false)
|
||
|
||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||
// support for the RLS CLuster Specifier is enabled, which can be enabled by
|
||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||
// "true".
|
||
- XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
|
||
+ XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false)
|
||
|
||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||
- C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
||
+ C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
|
||
)
|
||
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
|
||
index 30a3b4258fc0..b68e26a36493 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
|
||
@@ -110,7 +110,7 @@ type LoggerV2 interface {
|
||
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
|
||
// It is defined here to avoid a circular dependency.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
|
||
new file mode 100644
|
||
index 000000000000..6635f7bca96d
|
||
--- /dev/null
|
||
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
|
||
@@ -0,0 +1,32 @@
|
||
+/*
|
||
+ *
|
||
+ * Copyright 2022 gRPC authors.
|
||
+ *
|
||
+ * Licensed under the Apache License, Version 2.0 (the "License");
|
||
+ * you may not use this file except in compliance with the License.
|
||
+ * You may obtain a copy of the License at
|
||
+ *
|
||
+ * http://www.apache.org/licenses/LICENSE-2.0
|
||
+ *
|
||
+ * Unless required by applicable law or agreed to in writing, software
|
||
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
||
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+ * See the License for the specific language governing permissions and
|
||
+ * limitations under the License.
|
||
+ *
|
||
+ */
|
||
+
|
||
+package grpcsync
|
||
+
|
||
+import (
|
||
+ "sync"
|
||
+)
|
||
+
|
||
+// OnceFunc returns a function wrapping f which ensures f is only executed
|
||
+// once even if the returned function is executed multiple times.
|
||
+func OnceFunc(f func()) func() {
|
||
+ var once sync.Once
|
||
+ return func() {
|
||
+ once.Do(f)
|
||
+ }
|
||
+}
|
||
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
|
||
new file mode 100644
|
||
index 000000000000..9f4090967980
|
||
--- /dev/null
|
||
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
|
||
@@ -0,0 +1,47 @@
|
||
+/*
|
||
+ *
|
||
+ * Copyright 2022 gRPC authors.
|
||
+ *
|
||
+ * Licensed under the Apache License, Version 2.0 (the "License");
|
||
+ * you may not use this file except in compliance with the License.
|
||
+ * You may obtain a copy of the License at
|
||
+ *
|
||
+ * http://www.apache.org/licenses/LICENSE-2.0
|
||
+ *
|
||
+ * Unless required by applicable law or agreed to in writing, software
|
||
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
||
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||
+ * See the License for the specific language governing permissions and
|
||
+ * limitations under the License.
|
||
+ *
|
||
+ */
|
||
+
|
||
+package grpcutil
|
||
+
|
||
+import (
|
||
+ "strings"
|
||
+
|
||
+ "google.golang.org/grpc/internal/envconfig"
|
||
+)
|
||
+
|
||
+// RegisteredCompressorNames holds names of the registered compressors.
|
||
+var RegisteredCompressorNames []string
|
||
+
|
||
+// IsCompressorNameRegistered returns true when name is available in registry.
|
||
+func IsCompressorNameRegistered(name string) bool {
|
||
+ for _, compressor := range RegisteredCompressorNames {
|
||
+ if compressor == name {
|
||
+ return true
|
||
+ }
|
||
+ }
|
||
+ return false
|
||
+}
|
||
+
|
||
+// RegisteredCompressors returns a string of registered compressor names
|
||
+// separated by comma.
|
||
+func RegisteredCompressors() string {
|
||
+ if !envconfig.AdvertiseCompressors {
|
||
+ return ""
|
||
+ }
|
||
+ return strings.Join(RegisteredCompressorNames, ",")
|
||
+}
|
||
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
|
||
index e9c4af64830c..ec62b4775e5b 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
|
||
@@ -25,7 +25,6 @@ import (
|
||
|
||
// ParseMethod splits service and method from the input. It expects format
|
||
// "/service/method".
|
||
-//
|
||
func ParseMethod(methodName string) (service, method string, _ error) {
|
||
if !strings.HasPrefix(methodName, "/") {
|
||
return "", "", errors.New("invalid method name: should start with /")
|
||
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
|
||
index fd0ee3dcaf1e..0a76d9de6e02 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/internal.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/internal.go
|
||
@@ -77,6 +77,9 @@ var (
|
||
// ClearGlobalDialOptions clears the array of extra DialOption. This
|
||
// method is useful in testing and benchmarking.
|
||
ClearGlobalDialOptions func()
|
||
+ // JoinDialOptions combines the dial options passed as arguments into a
|
||
+ // single dial option.
|
||
+ JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
|
||
// JoinServerOptions combines the server options passed as arguments into a
|
||
// single server option.
|
||
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
|
||
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
|
||
index 75301c514913..09a667f33cb0 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
|
||
@@ -116,7 +116,7 @@ type dnsBuilder struct{}
|
||
|
||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||
- host, port, err := parseTarget(target.Endpoint, defaultPort)
|
||
+ host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
@@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
||
disableServiceConfig: opts.DisableServiceConfig,
|
||
}
|
||
|
||
- if target.Authority == "" {
|
||
+ if target.URL.Host == "" {
|
||
d.resolver = defaultResolver
|
||
} else {
|
||
- d.resolver, err = customAuthorityResolver(target.Authority)
|
||
+ d.resolver, err = customAuthorityResolver(target.URL.Host)
|
||
if err != nil {
|
||
return nil, err
|
||
}
|
||
diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
|
||
index 520d9229e1ed..afac56572ad5 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
|
||
@@ -20,13 +20,20 @@
|
||
// name without scheme back to gRPC as resolved address.
|
||
package passthrough
|
||
|
||
-import "google.golang.org/grpc/resolver"
|
||
+import (
|
||
+ "errors"
|
||
+
|
||
+ "google.golang.org/grpc/resolver"
|
||
+)
|
||
|
||
const scheme = "passthrough"
|
||
|
||
type passthroughBuilder struct{}
|
||
|
||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||
+ if target.Endpoint() == "" && opts.Dialer == nil {
|
||
+ return nil, errors.New("passthrough: received empty target in Build()")
|
||
+ }
|
||
r := &passthroughResolver{
|
||
target: target,
|
||
cc: cc,
|
||
@@ -45,7 +52,7 @@ type passthroughResolver struct {
|
||
}
|
||
|
||
func (r *passthroughResolver) start() {
|
||
- r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
|
||
+ r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
|
||
}
|
||
|
||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
||
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
|
||
index 7f1a702cacbe..160911687738 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
|
||
@@ -34,8 +34,8 @@ type builder struct {
|
||
}
|
||
|
||
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||
- if target.Authority != "" {
|
||
- return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
|
||
+ if target.URL.Host != "" {
|
||
+ return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
|
||
}
|
||
|
||
// gRPC was parsing the dial target manually before PR #4817, and we
|
||
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
|
||
index badbdbf597f3..51e733e495a3 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
|
||
@@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
|
||
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
|
||
// config. This method iterates through that list in order, and stops at the
|
||
// first policy that is supported.
|
||
-// - If the config for the first supported policy is invalid, the whole service
|
||
-// config is invalid.
|
||
-// - If the list doesn't contain any supported policy, the whole service config
|
||
-// is invalid.
|
||
+// - If the config for the first supported policy is invalid, the whole service
|
||
+// config is invalid.
|
||
+// - If the list doesn't contain any supported policy, the whole service config
|
||
+// is invalid.
|
||
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||
var ir intermediateBalancerConfig
|
||
err := json.Unmarshal(b, &ir)
|
||
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
|
||
index e5c6513edd13..b0ead4f54f82 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/status/status.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
|
||
@@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool {
|
||
}
|
||
return proto.Equal(e.s.s, tse.s.s)
|
||
}
|
||
+
|
||
+// IsRestrictedControlPlaneCode returns whether the status includes a code
|
||
+// restricted for control plane usage as defined by gRFC A54.
|
||
+func IsRestrictedControlPlaneCode(s *Status) bool {
|
||
+ switch s.Code() {
|
||
+ case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
|
||
+ return true
|
||
+ }
|
||
+ return false
|
||
+}
|
||
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
|
||
index 409769f48fdc..9097385e1a6a 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
|
||
@@ -191,7 +191,7 @@ type goAway struct {
|
||
code http2.ErrCode
|
||
debugData []byte
|
||
headsUp bool
|
||
- closeConn bool
|
||
+ closeConn error // if set, loopyWriter will exit, resulting in conn closure
|
||
}
|
||
|
||
func (*goAway) isTransportResponseFrame() bool { return false }
|
||
@@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct {
|
||
|
||
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
|
||
|
||
+// closeConnection is an instruction to tell the loopy writer to flush the
|
||
+// framer and exit, which will cause the transport's connection to be closed
|
||
+// (by the client or server). The transport itself will close after the reader
|
||
+// encounters the EOF caused by the connection closure.
|
||
+type closeConnection struct{}
|
||
+
|
||
+func (closeConnection) isTransportResponseFrame() bool { return false }
|
||
+
|
||
type outStreamState int
|
||
|
||
const (
|
||
@@ -408,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||
select {
|
||
case <-c.ch:
|
||
case <-c.done:
|
||
- return nil, ErrConnClosing
|
||
+ return nil, errors.New("transport closed by client")
|
||
}
|
||
}
|
||
}
|
||
@@ -519,18 +527,9 @@ const minBatchSize = 1000
|
||
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
||
// if the batch size is too low to give stream goroutines a chance to fill it up.
|
||
func (l *loopyWriter) run() (err error) {
|
||
- defer func() {
|
||
- if err == ErrConnClosing {
|
||
- // Don't log ErrConnClosing as error since it happens
|
||
- // 1. When the connection is closed by some other known issue.
|
||
- // 2. User closed the connection.
|
||
- // 3. A graceful close of connection.
|
||
- if logger.V(logLevel) {
|
||
- logger.Infof("transport: loopyWriter.run returning. %v", err)
|
||
- }
|
||
- err = nil
|
||
- }
|
||
- }()
|
||
+ // Always flush the writer before exiting in case there are pending frames
|
||
+ // to be sent.
|
||
+ defer l.framer.writer.Flush()
|
||
for {
|
||
it, err := l.cbuf.get(true)
|
||
if err != nil {
|
||
@@ -574,7 +573,6 @@ func (l *loopyWriter) run() (err error) {
|
||
}
|
||
l.framer.writer.Flush()
|
||
break hasdata
|
||
-
|
||
}
|
||
}
|
||
}
|
||
@@ -655,19 +653,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||
itl: &itemList{},
|
||
wq: h.wq,
|
||
}
|
||
- str.itl.enqueue(h)
|
||
- return l.originateStream(str)
|
||
+ return l.originateStream(str, h)
|
||
}
|
||
|
||
-func (l *loopyWriter) originateStream(str *outStream) error {
|
||
- hdr := str.itl.dequeue().(*headerFrame)
|
||
- if err := hdr.initStream(str.id); err != nil {
|
||
- if err == ErrConnClosing {
|
||
- return err
|
||
- }
|
||
- // Other errors(errStreamDrain) need not close transport.
|
||
+func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
|
||
+ // l.draining is set when handling GoAway. In which case, we want to avoid
|
||
+ // creating new streams.
|
||
+ if l.draining {
|
||
+ // TODO: provide a better error with the reason we are in draining.
|
||
+ hdr.onOrphaned(errStreamDrain)
|
||
return nil
|
||
}
|
||
+ if err := hdr.initStream(str.id); err != nil {
|
||
+ return err
|
||
+ }
|
||
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||
return err
|
||
}
|
||
@@ -763,8 +762,8 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||
return err
|
||
}
|
||
}
|
||
- if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
|
||
- return ErrConnClosing
|
||
+ if l.draining && len(l.estdStreams) == 0 {
|
||
+ return errors.New("finished processing active streams while in draining mode")
|
||
}
|
||
return nil
|
||
}
|
||
@@ -799,7 +798,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
||
if l.side == clientSide {
|
||
l.draining = true
|
||
if len(l.estdStreams) == 0 {
|
||
- return ErrConnClosing
|
||
+ return errors.New("received GOAWAY with no active streams")
|
||
}
|
||
}
|
||
return nil
|
||
@@ -817,6 +816,13 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
||
return nil
|
||
}
|
||
|
||
+func (l *loopyWriter) closeConnectionHandler() error {
|
||
+ // Exit loopyWriter entirely by returning an error here. This will lead to
|
||
+ // the transport closing the connection, and, ultimately, transport
|
||
+ // closure.
|
||
+ return ErrConnClosing
|
||
+}
|
||
+
|
||
func (l *loopyWriter) handle(i interface{}) error {
|
||
switch i := i.(type) {
|
||
case *incomingWindowUpdate:
|
||
@@ -845,6 +851,8 @@ func (l *loopyWriter) handle(i interface{}) error {
|
||
return l.goAwayHandler(i)
|
||
case *outFlowControlSizeRequest:
|
||
return l.outFlowControlSizeRequestHandler(i)
|
||
+ case closeConnection:
|
||
+ return l.closeConnectionHandler()
|
||
default:
|
||
return fmt.Errorf("transport: unknown control message type %T", i)
|
||
}
|
||
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
|
||
index 9fa306b2e07a..bc8ee0747496 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/transport/defaults.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go
|
||
@@ -47,3 +47,9 @@ const (
|
||
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
||
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
||
)
|
||
+
|
||
+// MaxStreamID is the upper bound for the stream ID before the current
|
||
+// transport gracefully closes and new transport is created for subsequent RPCs.
|
||
+// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
|
||
+// integer. It's exported so that tests can override it.
|
||
+var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
|
||
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
|
||
index 090120925bb4..e6626bf96e7c 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
|
||
@@ -46,24 +46,32 @@ import (
|
||
"google.golang.org/grpc/status"
|
||
)
|
||
|
||
-// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||
-// from inside an http.Handler. It requires that the http Server
|
||
-// supports HTTP/2.
|
||
+// NewServerHandlerTransport returns a ServerTransport handling gRPC from
|
||
+// inside an http.Handler, or writes an HTTP error to w and returns an error.
|
||
+// It requires that the http Server supports HTTP/2.
|
||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
|
||
if r.ProtoMajor != 2 {
|
||
- return nil, errors.New("gRPC requires HTTP/2")
|
||
+ msg := "gRPC requires HTTP/2"
|
||
+ http.Error(w, msg, http.StatusBadRequest)
|
||
+ return nil, errors.New(msg)
|
||
}
|
||
if r.Method != "POST" {
|
||
- return nil, errors.New("invalid gRPC request method")
|
||
+ msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
|
||
+ http.Error(w, msg, http.StatusBadRequest)
|
||
+ return nil, errors.New(msg)
|
||
}
|
||
contentType := r.Header.Get("Content-Type")
|
||
// TODO: do we assume contentType is lowercase? we did before
|
||
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
|
||
if !validContentType {
|
||
- return nil, errors.New("invalid gRPC request content-type")
|
||
+ msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
|
||
+ http.Error(w, msg, http.StatusUnsupportedMediaType)
|
||
+ return nil, errors.New(msg)
|
||
}
|
||
if _, ok := w.(http.Flusher); !ok {
|
||
- return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
||
+ msg := "gRPC requires a ResponseWriter supporting http.Flusher"
|
||
+ http.Error(w, msg, http.StatusInternalServerError)
|
||
+ return nil, errors.New(msg)
|
||
}
|
||
|
||
st := &serverHandlerTransport{
|
||
@@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
|
||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||
to, err := decodeTimeout(v)
|
||
if err != nil {
|
||
- return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
|
||
+ msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
|
||
+ http.Error(w, msg, http.StatusBadRequest)
|
||
+ return nil, status.Error(codes.Internal, msg)
|
||
}
|
||
st.timeoutSet = true
|
||
st.timeout = to
|
||
@@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
|
||
for _, v := range vv {
|
||
v, err := decodeMetadataHeader(k, v)
|
||
if err != nil {
|
||
- return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
|
||
+ msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
|
||
+ http.Error(w, msg, http.StatusBadRequest)
|
||
+ return nil, status.Error(codes.Internal, msg)
|
||
}
|
||
metakv = append(metakv, k, v)
|
||
}
|
||
@@ -141,12 +153,15 @@ type serverHandlerTransport struct {
|
||
stats []stats.Handler
|
||
}
|
||
|
||
-func (ht *serverHandlerTransport) Close() {
|
||
- ht.closeOnce.Do(ht.closeCloseChanOnce)
|
||
+func (ht *serverHandlerTransport) Close(err error) {
|
||
+ ht.closeOnce.Do(func() {
|
||
+ if logger.V(logLevel) {
|
||
+ logger.Infof("Closing serverHandlerTransport: %v", err)
|
||
+ }
|
||
+ close(ht.closedCh)
|
||
+ })
|
||
}
|
||
|
||
-func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
||
-
|
||
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||
|
||
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||
@@ -236,7 +251,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||
})
|
||
}
|
||
}
|
||
- ht.Close()
|
||
+ ht.Close(errors.New("finished writing status"))
|
||
return err
|
||
}
|
||
|
||
@@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||
case <-ht.req.Context().Done():
|
||
}
|
||
cancel()
|
||
- ht.Close()
|
||
+ ht.Close(errors.New("request is done processing"))
|
||
}()
|
||
|
||
req := ht.req
|
||
@@ -442,10 +457,10 @@ func (ht *serverHandlerTransport) Drain() {
|
||
// mapRecvMsgError returns the non-nil err into the appropriate
|
||
// error value as expected by callers of *grpc.parser.recvMsg.
|
||
// In particular, in can only be:
|
||
-// * io.EOF
|
||
-// * io.ErrUnexpectedEOF
|
||
-// * of type transport.ConnectionError
|
||
-// * an error from the status package
|
||
+// - io.EOF
|
||
+// - io.ErrUnexpectedEOF
|
||
+// - of type transport.ConnectionError
|
||
+// - an error from the status package
|
||
func mapRecvMsgError(err error) error {
|
||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||
return err
|
||
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
|
||
index 5c2f35b24e75..79ee8aea0a21 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
|
||
@@ -38,8 +38,10 @@ import (
|
||
"google.golang.org/grpc/credentials"
|
||
"google.golang.org/grpc/internal/channelz"
|
||
icredentials "google.golang.org/grpc/internal/credentials"
|
||
+ "google.golang.org/grpc/internal/grpcsync"
|
||
"google.golang.org/grpc/internal/grpcutil"
|
||
imetadata "google.golang.org/grpc/internal/metadata"
|
||
+ istatus "google.golang.org/grpc/internal/status"
|
||
"google.golang.org/grpc/internal/syscall"
|
||
"google.golang.org/grpc/internal/transport/networktype"
|
||
"google.golang.org/grpc/keepalive"
|
||
@@ -57,11 +59,15 @@ var clientConnectionCounter uint64
|
||
|
||
// http2Client implements the ClientTransport interface with HTTP2.
|
||
type http2Client struct {
|
||
- lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||
- ctx context.Context
|
||
- cancel context.CancelFunc
|
||
- ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||
- userAgent string
|
||
+ lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||
+ ctx context.Context
|
||
+ cancel context.CancelFunc
|
||
+ ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||
+ userAgent string
|
||
+ // address contains the resolver returned address for this transport.
|
||
+ // If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
|
||
+ // passed to `NewStream`, when determining the :authority header.
|
||
+ address resolver.Address
|
||
md metadata.MD
|
||
conn net.Conn // underlying communication channel
|
||
loopy *loopyWriter
|
||
@@ -99,16 +105,13 @@ type http2Client struct {
|
||
maxSendHeaderListSize *uint32
|
||
|
||
bdpEst *bdpEstimator
|
||
- // onPrefaceReceipt is a callback that client transport calls upon
|
||
- // receiving server preface to signal that a succefull HTTP2
|
||
- // connection was established.
|
||
- onPrefaceReceipt func()
|
||
|
||
maxConcurrentStreams uint32
|
||
streamQuota int64
|
||
streamsQuotaAvailable chan struct{}
|
||
waitingStreams uint32
|
||
nextID uint32
|
||
+ registeredCompressors string
|
||
|
||
// Do not access controlBuf with mu held.
|
||
mu sync.Mutex // guard the following variables
|
||
@@ -137,8 +140,7 @@ type http2Client struct {
|
||
channelzID *channelz.Identifier
|
||
czData *channelzData
|
||
|
||
- onGoAway func(GoAwayReason)
|
||
- onClose func()
|
||
+ onClose func(GoAwayReason)
|
||
|
||
bufferPool *bufferPool
|
||
|
||
@@ -194,7 +196,7 @@ func isTemporary(err error) bool {
|
||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||
// and starts to receive messages on it. Non-nil error returns if construction
|
||
// fails.
|
||
-func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||
+func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
|
||
scheme := "http"
|
||
ctx, cancel := context.WithCancel(ctx)
|
||
defer func() {
|
||
@@ -214,14 +216,40 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||
if opts.FailOnNonTempDialError {
|
||
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
|
||
}
|
||
- return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
|
||
+ return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
|
||
}
|
||
+
|
||
// Any further errors will close the underlying connection
|
||
defer func(conn net.Conn) {
|
||
if err != nil {
|
||
conn.Close()
|
||
}
|
||
}(conn)
|
||
+
|
||
+ // The following defer and goroutine monitor the connectCtx for cancelation
|
||
+ // and deadline. On context expiration, the connection is hard closed and
|
||
+ // this function will naturally fail as a result. Otherwise, the defer
|
||
+ // waits for the goroutine to exit to prevent the context from being
|
||
+ // monitored (and to prevent the connection from ever being closed) after
|
||
+ // returning from this function.
|
||
+ ctxMonitorDone := grpcsync.NewEvent()
|
||
+ newClientCtx, newClientDone := context.WithCancel(connectCtx)
|
||
+ defer func() {
|
||
+ newClientDone() // Awaken the goroutine below if connectCtx hasn't expired.
|
||
+ <-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
|
||
+ }()
|
||
+ go func(conn net.Conn) {
|
||
+ defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
|
||
+ <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes.
|
||
+ if err := connectCtx.Err(); err != nil {
|
||
+ // connectCtx expired before exiting the function. Hard close the connection.
|
||
+ if logger.V(logLevel) {
|
||
+ logger.Infof("newClientTransport: aborting due to connectCtx: %v", err)
|
||
+ }
|
||
+ conn.Close()
|
||
+ }
|
||
+ }(conn)
|
||
+
|
||
kp := opts.KeepaliveParams
|
||
// Validate keepalive parameters.
|
||
if kp.Time == 0 {
|
||
@@ -253,15 +281,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||
}
|
||
}
|
||
if transportCreds != nil {
|
||
- rawConn := conn
|
||
- // Pull the deadline from the connectCtx, which will be used for
|
||
- // timeouts in the authentication protocol handshake. Can ignore the
|
||
- // boolean as the deadline will return the zero value, which will make
|
||
- // the conn not timeout on I/O operations.
|
||
- deadline, _ := connectCtx.Deadline()
|
||
- rawConn.SetDeadline(deadline)
|
||
- conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn)
|
||
- rawConn.SetDeadline(time.Time{})
|
||
+ conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
|
||
if err != nil {
|
||
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
|
||
}
|
||
@@ -299,6 +319,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||
ctxDone: ctx.Done(), // Cache Done chan.
|
||
cancel: cancel,
|
||
userAgent: opts.UserAgent,
|
||
+ registeredCompressors: grpcutil.RegisteredCompressors(),
|
||
+ address: addr,
|
||
conn: conn,
|
||
remoteAddr: conn.RemoteAddr(),
|
||
localAddr: conn.LocalAddr(),
|
||
@@ -315,16 +337,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||
kp: kp,
|
||
statsHandlers: opts.StatsHandlers,
|
||
initialWindowSize: initialWindowSize,
|
||
- onPrefaceReceipt: onPrefaceReceipt,
|
||
nextID: 1,
|
||
maxConcurrentStreams: defaultMaxStreamsClient,
|
||
streamQuota: defaultMaxStreamsClient,
|
||
streamsQuotaAvailable: make(chan struct{}, 1),
|
||
czData: new(channelzData),
|
||
- onGoAway: onGoAway,
|
||
- onClose: onClose,
|
||
keepaliveEnabled: keepaliveEnabled,
|
||
bufferPool: newBufferPool(),
|
||
+ onClose: onClose,
|
||
}
|
||
// Add peer information to the http2client context.
|
||
t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
||
@@ -363,21 +383,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||
t.kpDormancyCond = sync.NewCond(&t.mu)
|
||
go t.keepalive()
|
||
}
|
||
- // Start the reader goroutine for incoming message. Each transport has
|
||
- // a dedicated goroutine which reads HTTP2 frame from network. Then it
|
||
- // dispatches the frame to the corresponding stream entity.
|
||
- go t.reader()
|
||
+
|
||
+ // Start the reader goroutine for incoming messages. Each transport has a
|
||
+ // dedicated goroutine which reads HTTP2 frames from the network. Then it
|
||
+ // dispatches the frame to the corresponding stream entity. When the
|
||
+ // server preface is received, readerErrCh is closed. If an error occurs
|
||
+ // first, an error is pushed to the channel. This must be checked before
|
||
+ // returning from this function.
|
||
+ readerErrCh := make(chan error, 1)
|
||
+ go t.reader(readerErrCh)
|
||
+ defer func() {
|
||
+ if err == nil {
|
||
+ err = <-readerErrCh
|
||
+ }
|
||
+ if err != nil {
|
||
+ t.Close(err)
|
||
+ }
|
||
+ }()
|
||
|
||
// Send connection preface to server.
|
||
n, err := t.conn.Write(clientPreface)
|
||
if err != nil {
|
||
err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
|
||
- t.Close(err)
|
||
return nil, err
|
||
}
|
||
if n != len(clientPreface) {
|
||
err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
||
- t.Close(err)
|
||
return nil, err
|
||
}
|
||
var ss []http2.Setting
|
||
@@ -397,14 +428,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||
err = t.framer.fr.WriteSettings(ss...)
|
||
if err != nil {
|
||
err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
|
||
- t.Close(err)
|
||
return nil, err
|
||
}
|
||
// Adjust the connection flow control window if needed.
|
||
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
|
||
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
|
||
err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
|
||
- t.Close(err)
|
||
return nil, err
|
||
}
|
||
}
|
||
@@ -417,10 +446,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||
go func() {
|
||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
||
err := t.loopy.run()
|
||
- if err != nil {
|
||
- if logger.V(logLevel) {
|
||
- logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||
- }
|
||
+ if logger.V(logLevel) {
|
||
+ logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
|
||
}
|
||
// Do not close the transport. Let reader goroutine handle it since
|
||
// there might be data in the buffers.
|
||
@@ -507,9 +534,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
|
||
}
|
||
|
||
+ registeredCompressors := t.registeredCompressors
|
||
if callHdr.SendCompress != "" {
|
||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
|
||
+ // Include the outgoing compressor name when compressor is not registered
|
||
+ // via encoding.RegisterCompressor. This is possible when client uses
|
||
+ // WithCompressor dial option.
|
||
+ if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
|
||
+ if registeredCompressors != "" {
|
||
+ registeredCompressors += ","
|
||
+ }
|
||
+ registeredCompressors += callHdr.SendCompress
|
||
+ }
|
||
+ }
|
||
+
|
||
+ if registeredCompressors != "" {
|
||
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
|
||
}
|
||
if dl, ok := ctx.Deadline(); ok {
|
||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||
@@ -589,7 +629,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s
|
||
for _, c := range t.perRPCCreds {
|
||
data, err := c.GetRequestMetadata(ctx, audience)
|
||
if err != nil {
|
||
- if _, ok := status.FromError(err); ok {
|
||
+ if st, ok := status.FromError(err); ok {
|
||
+ // Restrict the code to the list allowed by gRFC A54.
|
||
+ if istatus.IsRestrictedControlPlaneCode(st) {
|
||
+ err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
|
||
+ }
|
||
return nil, err
|
||
}
|
||
|
||
@@ -618,7 +662,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||
}
|
||
data, err := callCreds.GetRequestMetadata(ctx, audience)
|
||
if err != nil {
|
||
- return nil, status.Errorf(codes.Internal, "transport: %v", err)
|
||
+ if st, ok := status.FromError(err); ok {
|
||
+ // Restrict the code to the list allowed by gRFC A54.
|
||
+ if istatus.IsRestrictedControlPlaneCode(st) {
|
||
+ err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
|
||
+ }
|
||
+ return nil, err
|
||
+ }
|
||
+ return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
|
||
}
|
||
callAuthData = make(map[string]string, len(data))
|
||
for k, v := range data {
|
||
@@ -634,13 +685,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||
// NewStream errors result in transparent retry, as they mean nothing went onto
|
||
// the wire. However, there are two notable exceptions:
|
||
//
|
||
-// 1. If the stream headers violate the max header list size allowed by the
|
||
-// server. It's possible this could succeed on another transport, even if
|
||
-// it's unlikely, but do not transparently retry.
|
||
-// 2. If the credentials errored when requesting their headers. In this case,
|
||
-// it's possible a retry can fix the problem, but indefinitely transparently
|
||
-// retrying is not appropriate as it is likely the credentials, if they can
|
||
-// eventually succeed, would need I/O to do so.
|
||
+// 1. If the stream headers violate the max header list size allowed by the
|
||
+// server. It's possible this could succeed on another transport, even if
|
||
+// it's unlikely, but do not transparently retry.
|
||
+// 2. If the credentials errored when requesting their headers. In this case,
|
||
+// it's possible a retry can fix the problem, but indefinitely transparently
|
||
+// retrying is not appropriate as it is likely the credentials, if they can
|
||
+// eventually succeed, would need I/O to do so.
|
||
type NewStreamError struct {
|
||
Err error
|
||
|
||
@@ -655,6 +706,18 @@ func (e NewStreamError) Error() string {
|
||
// streams. All non-nil errors returned will be *NewStreamError.
|
||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
|
||
ctx = peer.NewContext(ctx, t.getPeer())
|
||
+
|
||
+ // ServerName field of the resolver returned address takes precedence over
|
||
+ // Host field of CallHdr to determine the :authority header. This is because,
|
||
+ // the ServerName field takes precedence for server authentication during
|
||
+ // TLS handshake, and the :authority header should match the value used
|
||
+ // for server authentication.
|
||
+ if t.address.ServerName != "" {
|
||
+ newCallHdr := *callHdr
|
||
+ newCallHdr.Host = t.address.ServerName
|
||
+ callHdr = &newCallHdr
|
||
+ }
|
||
+
|
||
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
||
if err != nil {
|
||
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
|
||
@@ -679,15 +742,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||
endStream: false,
|
||
initStream: func(id uint32) error {
|
||
t.mu.Lock()
|
||
- if state := t.state; state != reachable {
|
||
+ // TODO: handle transport closure in loopy instead and remove this
|
||
+ // initStream is never called when transport is draining.
|
||
+ if t.state == closing {
|
||
t.mu.Unlock()
|
||
- // Do a quick cleanup.
|
||
- err := error(errStreamDrain)
|
||
- if state == closing {
|
||
- err = ErrConnClosing
|
||
- }
|
||
- cleanup(err)
|
||
- return err
|
||
+ cleanup(ErrConnClosing)
|
||
+ return ErrConnClosing
|
||
}
|
||
if channelz.IsOn() {
|
||
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||
@@ -705,6 +765,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||
}
|
||
firstTry := true
|
||
var ch chan struct{}
|
||
+ transportDrainRequired := false
|
||
checkForStreamQuota := func(it interface{}) bool {
|
||
if t.streamQuota <= 0 { // Can go negative if server decreases it.
|
||
if firstTry {
|
||
@@ -720,6 +781,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||
h := it.(*headerFrame)
|
||
h.streamID = t.nextID
|
||
t.nextID += 2
|
||
+
|
||
+ // Drain client transport if nextID > MaxStreamID which signals gRPC that
|
||
+ // the connection is closed and a new one must be created for subsequent RPCs.
|
||
+ transportDrainRequired = t.nextID > MaxStreamID
|
||
+
|
||
s.id = h.streamID
|
||
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
|
||
t.mu.Lock()
|
||
@@ -799,6 +865,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
||
sh.HandleRPC(s.ctx, outHeader)
|
||
}
|
||
}
|
||
+ if transportDrainRequired {
|
||
+ if logger.V(logLevel) {
|
||
+ logger.Infof("transport: t.nextID > MaxStreamID. Draining")
|
||
+ }
|
||
+ t.GracefulClose()
|
||
+ }
|
||
return s, nil
|
||
}
|
||
|
||
@@ -880,20 +952,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||
// Close kicks off the shutdown process of the transport. This should be called
|
||
// only once on a transport. Once it is called, the transport should not be
|
||
// accessed any more.
|
||
-//
|
||
-// This method blocks until the addrConn that initiated this transport is
|
||
-// re-connected. This happens because t.onClose() begins reconnect logic at the
|
||
-// addrConn level and blocks until the addrConn is successfully connected.
|
||
func (t *http2Client) Close(err error) {
|
||
t.mu.Lock()
|
||
- // Make sure we only Close once.
|
||
+ // Make sure we only close once.
|
||
if t.state == closing {
|
||
t.mu.Unlock()
|
||
return
|
||
}
|
||
- // Call t.onClose before setting the state to closing to prevent the client
|
||
- // from attempting to create new streams ASAP.
|
||
- t.onClose()
|
||
+ if logger.V(logLevel) {
|
||
+ logger.Infof("transport: closing: %v", err)
|
||
+ }
|
||
+ // Call t.onClose ASAP to prevent the client from attempting to create new
|
||
+ // streams.
|
||
+ if t.state != draining {
|
||
+ t.onClose(GoAwayInvalid)
|
||
+ }
|
||
t.state = closing
|
||
streams := t.activeStreams
|
||
t.activeStreams = nil
|
||
@@ -943,11 +1016,15 @@ func (t *http2Client) GracefulClose() {
|
||
t.mu.Unlock()
|
||
return
|
||
}
|
||
+ if logger.V(logLevel) {
|
||
+ logger.Infof("transport: GracefulClose called")
|
||
+ }
|
||
+ t.onClose(GoAwayInvalid)
|
||
t.state = draining
|
||
active := len(t.activeStreams)
|
||
t.mu.Unlock()
|
||
if active == 0 {
|
||
- t.Close(ErrConnClosing)
|
||
+ t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
|
||
return
|
||
}
|
||
t.controlBuf.put(&incomingGoAway{})
|
||
@@ -1105,7 +1182,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
||
if !ok {
|
||
if logger.V(logLevel) {
|
||
- logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||
+ logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode)
|
||
}
|
||
statusCode = codes.Unknown
|
||
}
|
||
@@ -1223,8 +1300,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||
// Notify the clientconn about the GOAWAY before we set the state to
|
||
// draining, to allow the client to stop attempting to create streams
|
||
// before disallowing new streams on this connection.
|
||
- t.onGoAway(t.goAwayReason)
|
||
- t.state = draining
|
||
+ if t.state != draining {
|
||
+ t.onClose(t.goAwayReason)
|
||
+ t.state = draining
|
||
+ }
|
||
}
|
||
// All streams with IDs greater than the GoAwayId
|
||
// and smaller than the previous GoAway ID should be killed.
|
||
@@ -1482,33 +1561,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
|
||
}
|
||
|
||
-// reader runs as a separate goroutine in charge of reading data from network
|
||
-// connection.
|
||
-//
|
||
-// TODO(zhaoq): currently one reader per transport. Investigate whether this is
|
||
-// optimal.
|
||
-// TODO(zhaoq): Check the validity of the incoming frame sequence.
|
||
-func (t *http2Client) reader() {
|
||
- defer close(t.readerDone)
|
||
- // Check the validity of server preface.
|
||
+// readServerPreface reads and handles the initial settings frame from the
|
||
+// server.
|
||
+func (t *http2Client) readServerPreface() error {
|
||
frame, err := t.framer.fr.ReadFrame()
|
||
if err != nil {
|
||
- err = connectionErrorf(true, err, "error reading server preface: %v", err)
|
||
- t.Close(err) // this kicks off resetTransport, so must be last before return
|
||
- return
|
||
- }
|
||
- t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
|
||
- if t.keepaliveEnabled {
|
||
- atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||
+ return connectionErrorf(true, err, "error reading server preface: %v", err)
|
||
}
|
||
sf, ok := frame.(*http2.SettingsFrame)
|
||
if !ok {
|
||
- // this kicks off resetTransport, so must be last before return
|
||
- t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame))
|
||
- return
|
||
+ return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
|
||
}
|
||
- t.onPrefaceReceipt()
|
||
t.handleSettings(sf, true)
|
||
+ return nil
|
||
+}
|
||
+
|
||
+// reader verifies the server preface and reads all subsequent data from
|
||
+// network connection. If the server preface is not read successfully, an
|
||
+// error is pushed to errCh; otherwise errCh is closed with no error.
|
||
+func (t *http2Client) reader(errCh chan<- error) {
|
||
+ defer close(t.readerDone)
|
||
+
|
||
+ if err := t.readServerPreface(); err != nil {
|
||
+ errCh <- err
|
||
+ return
|
||
+ }
|
||
+ close(errCh)
|
||
+ if t.keepaliveEnabled {
|
||
+ atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||
+ }
|
||
|
||
// loop to keep reading incoming messages on this transport.
|
||
for {
|
||
@@ -1711,3 +1792,9 @@ func (t *http2Client) getOutFlowWindow() int64 {
|
||
return -2
|
||
}
|
||
}
|
||
+
|
||
+func (t *http2Client) stateForTesting() transportState {
|
||
+ t.mu.Lock()
|
||
+ defer t.mu.Unlock()
|
||
+ return t.state
|
||
+}
|
||
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
|
||
index 3dd15647bc84..bc3da706726d 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
|
||
@@ -21,6 +21,7 @@ package transport
|
||
import (
|
||
"bytes"
|
||
"context"
|
||
+ "errors"
|
||
"fmt"
|
||
"io"
|
||
"math"
|
||
@@ -41,6 +42,7 @@ import (
|
||
"google.golang.org/grpc/credentials"
|
||
"google.golang.org/grpc/internal/channelz"
|
||
"google.golang.org/grpc/internal/grpcrand"
|
||
+ "google.golang.org/grpc/internal/grpcsync"
|
||
"google.golang.org/grpc/keepalive"
|
||
"google.golang.org/grpc/metadata"
|
||
"google.golang.org/grpc/peer"
|
||
@@ -101,13 +103,13 @@ type http2Server struct {
|
||
|
||
mu sync.Mutex // guard the following
|
||
|
||
- // drainChan is initialized when Drain() is called the first time.
|
||
- // After which the server writes out the first GoAway(with ID 2^31-1) frame.
|
||
- // Then an independent goroutine will be launched to later send the second GoAway.
|
||
- // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
|
||
- // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
|
||
- // already underway.
|
||
- drainChan chan struct{}
|
||
+ // drainEvent is initialized when Drain() is called the first time. After
|
||
+ // which the server writes out the first GoAway(with ID 2^31-1) frame. Then
|
||
+ // an independent goroutine will be launched to later send the second
|
||
+ // GoAway. During this time we don't want to write another first GoAway(with
|
||
+ // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
|
||
+ // already initialized since draining is already underway.
|
||
+ drainEvent *grpcsync.Event
|
||
state transportState
|
||
activeStreams map[uint32]*Stream
|
||
// idle is the time instant when the connection went idle.
|
||
@@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||
|
||
defer func() {
|
||
if err != nil {
|
||
- t.Close()
|
||
+ t.Close(err)
|
||
}
|
||
}()
|
||
|
||
@@ -331,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||
go func() {
|
||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
|
||
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
||
- if err := t.loopy.run(); err != nil {
|
||
- if logger.V(logLevel) {
|
||
- logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||
- }
|
||
+ err := t.loopy.run()
|
||
+ if logger.V(logLevel) {
|
||
+ logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
|
||
}
|
||
t.conn.Close()
|
||
t.controlBuf.finish()
|
||
@@ -344,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||
return t, nil
|
||
}
|
||
|
||
-// operateHeader takes action on the decoded headers.
|
||
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
|
||
+// operateHeaders takes action on the decoded headers. Returns an error if fatal
|
||
+// error encountered and transport needs to close, otherwise returns nil.
|
||
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
|
||
// Acquire max stream ID lock for entire duration
|
||
t.maxStreamMu.Lock()
|
||
defer t.maxStreamMu.Unlock()
|
||
@@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
rstCode: http2.ErrCodeFrameSize,
|
||
onWrite: func() {},
|
||
})
|
||
- return false
|
||
+ return nil
|
||
}
|
||
|
||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||
// illegal gRPC stream id.
|
||
- if logger.V(logLevel) {
|
||
- logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||
- }
|
||
- return true
|
||
+ return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
|
||
}
|
||
t.maxStreamID = streamID
|
||
|
||
@@ -381,13 +380,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||
}
|
||
var (
|
||
- // If a gRPC Response-Headers has already been received, then it means
|
||
- // that the peer is speaking gRPC and we are in gRPC mode.
|
||
- isGRPC = false
|
||
- mdata = make(map[string][]string)
|
||
- httpMethod string
|
||
- // headerError is set if an error is encountered while parsing the headers
|
||
- headerError bool
|
||
+ // if false, content-type was missing or invalid
|
||
+ isGRPC = false
|
||
+ contentType = ""
|
||
+ mdata = make(map[string][]string)
|
||
+ httpMethod string
|
||
+ // these are set if an error is encountered while parsing the headers
|
||
+ protocolError bool
|
||
+ headerError *status.Status
|
||
|
||
timeoutSet bool
|
||
timeout time.Duration
|
||
@@ -398,6 +398,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
case "content-type":
|
||
contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
|
||
if !validContentType {
|
||
+ contentType = hf.Value
|
||
break
|
||
}
|
||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||
@@ -413,7 +414,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
timeoutSet = true
|
||
var err error
|
||
if timeout, err = decodeTimeout(hf.Value); err != nil {
|
||
- headerError = true
|
||
+ headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
|
||
}
|
||
// "Transports must consider requests containing the Connection header
|
||
// as malformed." - A41
|
||
@@ -421,14 +422,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
if logger.V(logLevel) {
|
||
logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
|
||
}
|
||
- headerError = true
|
||
+ protocolError = true
|
||
default:
|
||
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
|
||
break
|
||
}
|
||
v, err := decodeMetadataHeader(hf.Name, hf.Value)
|
||
if err != nil {
|
||
- headerError = true
|
||
+ headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
|
||
logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||
break
|
||
}
|
||
@@ -447,23 +448,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
logger.Errorf("transport: %v", errMsg)
|
||
}
|
||
t.controlBuf.put(&earlyAbortStream{
|
||
- httpStatus: 400,
|
||
+ httpStatus: http.StatusBadRequest,
|
||
streamID: streamID,
|
||
contentSubtype: s.contentSubtype,
|
||
status: status.New(codes.Internal, errMsg),
|
||
rst: !frame.StreamEnded(),
|
||
})
|
||
- return false
|
||
+ return nil
|
||
}
|
||
|
||
- if !isGRPC || headerError {
|
||
+ if protocolError {
|
||
t.controlBuf.put(&cleanupStream{
|
||
streamID: streamID,
|
||
rst: true,
|
||
rstCode: http2.ErrCodeProtocol,
|
||
onWrite: func() {},
|
||
})
|
||
- return false
|
||
+ return nil
|
||
+ }
|
||
+ if !isGRPC {
|
||
+ t.controlBuf.put(&earlyAbortStream{
|
||
+ httpStatus: http.StatusUnsupportedMediaType,
|
||
+ streamID: streamID,
|
||
+ contentSubtype: s.contentSubtype,
|
||
+ status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
|
||
+ rst: !frame.StreamEnded(),
|
||
+ })
|
||
+ return nil
|
||
+ }
|
||
+ if headerError != nil {
|
||
+ t.controlBuf.put(&earlyAbortStream{
|
||
+ httpStatus: http.StatusBadRequest,
|
||
+ streamID: streamID,
|
||
+ contentSubtype: s.contentSubtype,
|
||
+ status: headerError,
|
||
+ rst: !frame.StreamEnded(),
|
||
+ })
|
||
+ return nil
|
||
}
|
||
|
||
// "If :authority is missing, Host must be renamed to :authority." - A41
|
||
@@ -503,7 +524,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
if t.state != reachable {
|
||
t.mu.Unlock()
|
||
s.cancel()
|
||
- return false
|
||
+ return nil
|
||
}
|
||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||
t.mu.Unlock()
|
||
@@ -514,7 +535,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
onWrite: func() {},
|
||
})
|
||
s.cancel()
|
||
- return false
|
||
+ return nil
|
||
}
|
||
if httpMethod != http.MethodPost {
|
||
t.mu.Unlock()
|
||
@@ -530,7 +551,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
rst: !frame.StreamEnded(),
|
||
})
|
||
s.cancel()
|
||
- return false
|
||
+ return nil
|
||
}
|
||
if t.inTapHandle != nil {
|
||
var err error
|
||
@@ -550,7 +571,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
status: stat,
|
||
rst: !frame.StreamEnded(),
|
||
})
|
||
- return false
|
||
+ return nil
|
||
}
|
||
}
|
||
t.activeStreams[streamID] = s
|
||
@@ -597,7 +618,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||
wq: s.wq,
|
||
})
|
||
handle(s)
|
||
- return false
|
||
+ return nil
|
||
}
|
||
|
||
// HandleStreams receives incoming streams using the given handler. This is
|
||
@@ -630,19 +651,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||
continue
|
||
}
|
||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||
- t.Close()
|
||
+ t.Close(err)
|
||
return
|
||
}
|
||
- if logger.V(logLevel) {
|
||
- logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||
- }
|
||
- t.Close()
|
||
+ t.Close(err)
|
||
return
|
||
}
|
||
switch frame := frame.(type) {
|
||
case *http2.MetaHeadersFrame:
|
||
- if t.operateHeaders(frame, handle, traceCtx) {
|
||
- t.Close()
|
||
+ if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
|
||
+ t.Close(err)
|
||
break
|
||
}
|
||
case *http2.DataFrame:
|
||
@@ -843,8 +861,8 @@ const (
|
||
|
||
func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||
if f.IsAck() {
|
||
- if f.Data == goAwayPing.data && t.drainChan != nil {
|
||
- close(t.drainChan)
|
||
+ if f.Data == goAwayPing.data && t.drainEvent != nil {
|
||
+ t.drainEvent.Fire()
|
||
return
|
||
}
|
||
// Maybe it's a BDP ping.
|
||
@@ -886,10 +904,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||
|
||
if t.pingStrikes > maxPingStrikes {
|
||
// Send goaway and close the connection.
|
||
- if logger.V(logLevel) {
|
||
- logger.Errorf("transport: Got too many pings from the client, closing the connection.")
|
||
- }
|
||
- t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
|
||
+ t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
|
||
}
|
||
}
|
||
|
||
@@ -1153,7 +1168,7 @@ func (t *http2Server) keepalive() {
|
||
if logger.V(logLevel) {
|
||
logger.Infof("transport: closing server transport due to maximum connection age.")
|
||
}
|
||
- t.Close()
|
||
+ t.controlBuf.put(closeConnection{})
|
||
case <-t.done:
|
||
}
|
||
return
|
||
@@ -1169,10 +1184,7 @@ func (t *http2Server) keepalive() {
|
||
continue
|
||
}
|
||
if outstandingPing && kpTimeoutLeft <= 0 {
|
||
- if logger.V(logLevel) {
|
||
- logger.Infof("transport: closing server transport due to idleness.")
|
||
- }
|
||
- t.Close()
|
||
+ t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
|
||
return
|
||
}
|
||
if !outstandingPing {
|
||
@@ -1199,12 +1211,15 @@ func (t *http2Server) keepalive() {
|
||
// Close starts shutting down the http2Server transport.
|
||
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
|
||
// could cause some resource issue. Revisit this later.
|
||
-func (t *http2Server) Close() {
|
||
+func (t *http2Server) Close(err error) {
|
||
t.mu.Lock()
|
||
if t.state == closing {
|
||
t.mu.Unlock()
|
||
return
|
||
}
|
||
+ if logger.V(logLevel) {
|
||
+ logger.Infof("transport: closing: %v", err)
|
||
+ }
|
||
t.state = closing
|
||
streams := t.activeStreams
|
||
t.activeStreams = nil
|
||
@@ -1295,10 +1310,10 @@ func (t *http2Server) RemoteAddr() net.Addr {
|
||
func (t *http2Server) Drain() {
|
||
t.mu.Lock()
|
||
defer t.mu.Unlock()
|
||
- if t.drainChan != nil {
|
||
+ if t.drainEvent != nil {
|
||
return
|
||
}
|
||
- t.drainChan = make(chan struct{})
|
||
+ t.drainEvent = grpcsync.NewEvent()
|
||
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
|
||
}
|
||
|
||
@@ -1319,19 +1334,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||
// Stop accepting more streams now.
|
||
t.state = draining
|
||
sid := t.maxStreamID
|
||
+ retErr := g.closeConn
|
||
if len(t.activeStreams) == 0 {
|
||
- g.closeConn = true
|
||
+ retErr = errors.New("second GOAWAY written and no active streams left to process")
|
||
}
|
||
t.mu.Unlock()
|
||
t.maxStreamMu.Unlock()
|
||
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
|
||
return false, err
|
||
}
|
||
- if g.closeConn {
|
||
+ if retErr != nil {
|
||
// Abruptly close the connection following the GoAway (via
|
||
// loopywriter). But flush out what's inside the buffer first.
|
||
t.framer.writer.Flush()
|
||
- return false, fmt.Errorf("transport: Connection closing")
|
||
+ return false, retErr
|
||
}
|
||
return true, nil
|
||
}
|
||
@@ -1353,7 +1369,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||
timer := time.NewTimer(time.Minute)
|
||
defer timer.Stop()
|
||
select {
|
||
- case <-t.drainChan:
|
||
+ case <-t.drainEvent.Done():
|
||
case <-timer.C:
|
||
case <-t.done:
|
||
return
|
||
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
|
||
index 6c3ba8515940..0ac77ea4f8c7 100644
|
||
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
|
||
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
|
||
@@ -43,6 +43,10 @@ import (
|
||
"google.golang.org/grpc/tap"
|
||
)
|
||
|
||
+// ErrNoHeaders is used as a signal that a trailers only response was received,
|
||
+// and is not a real error.
|
||
+var ErrNoHeaders = errors.New("stream has no headers")
|
||
+
|
||
const logLevel = 2
|
||
|
||
type bufferPool struct {
|
||
@@ -366,9 +370,15 @@ func (s *Stream) Header() (metadata.MD, error) {
|
||
return s.header.Copy(), nil
|
||
}
|
||
s.waitOnHeader()
|
||
+
|
||
if !s.headerValid {
|
||
return nil, s.status.Err()
|
||
}
|
||
+
|
||
+ if s.noHeaders {
|
||
+ return nil, ErrNoHeaders
|
||
+ }
|
||
+
|
||
return s.header.Copy(), nil
|
||
}
|
||
|
||
@@ -573,8 +583,8 @@ type ConnectOptions struct {
|
||
|
||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||
// and returns it to the caller.
|
||
-func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||
- return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
|
||
+func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
|
||
+ return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
|
||
}
|
||
|
||
// Options provides additional hints and information for message
|
||
@@ -691,7 +701,7 @@ type ServerTransport interface {
|
||
// Close tears down the transport. Once it is called, the transport
|
||
// should not be accessed any more. All the pending streams and their
|
||
// handlers will be terminated asynchronously.
|
||
- Close()
|
||
+ Close(err error)
|
||
|
||
// RemoteAddr returns the remote network address.
|
||
RemoteAddr() net.Addr
|
||
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
|
||
index 98d62e0675f6..fb4a88f59bd3 100644
|
||
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
|
||
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
|
||
@@ -41,10 +41,11 @@ type MD map[string][]string
|
||
// New creates an MD from a given key-value map.
|
||
//
|
||
// Only the following ASCII characters are allowed in keys:
|
||
-// - digits: 0-9
|
||
-// - uppercase letters: A-Z (normalized to lower)
|
||
-// - lowercase letters: a-z
|
||
-// - special characters: -_.
|
||
+// - digits: 0-9
|
||
+// - uppercase letters: A-Z (normalized to lower)
|
||
+// - lowercase letters: a-z
|
||
+// - special characters: -_.
|
||
+//
|
||
// Uppercase letters are automatically converted to lowercase.
|
||
//
|
||
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
|
||
@@ -62,10 +63,11 @@ func New(m map[string]string) MD {
|
||
// Pairs panics if len(kv) is odd.
|
||
//
|
||
// Only the following ASCII characters are allowed in keys:
|
||
-// - digits: 0-9
|
||
-// - uppercase letters: A-Z (normalized to lower)
|
||
-// - lowercase letters: a-z
|
||
-// - special characters: -_.
|
||
+// - digits: 0-9
|
||
+// - uppercase letters: A-Z (normalized to lower)
|
||
+// - lowercase letters: a-z
|
||
+// - special characters: -_.
|
||
+//
|
||
// Uppercase letters are automatically converted to lowercase.
|
||
//
|
||
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
|
||
@@ -196,7 +198,7 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
|
||
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
|
||
// key from the incoming metadata if it exists. Key must be lower-case.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
|
||
index 843633c910a1..c525dc070fc6 100644
|
||
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
|
||
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
|
||
@@ -26,6 +26,7 @@ import (
|
||
"google.golang.org/grpc/balancer"
|
||
"google.golang.org/grpc/codes"
|
||
"google.golang.org/grpc/internal/channelz"
|
||
+ istatus "google.golang.org/grpc/internal/status"
|
||
"google.golang.org/grpc/internal/transport"
|
||
"google.golang.org/grpc/status"
|
||
)
|
||
@@ -57,12 +58,18 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
||
pw.mu.Unlock()
|
||
}
|
||
|
||
-func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
|
||
+// doneChannelzWrapper performs the following:
|
||
+// - increments the calls started channelz counter
|
||
+// - wraps the done function in the passed in result to increment the calls
|
||
+// failed or calls succeeded channelz counter before invoking the actual
|
||
+// done function.
|
||
+func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) {
|
||
acw.mu.Lock()
|
||
ac := acw.ac
|
||
acw.mu.Unlock()
|
||
ac.incrCallsStarted()
|
||
- return func(b balancer.DoneInfo) {
|
||
+ done := result.Done
|
||
+ result.Done = func(b balancer.DoneInfo) {
|
||
if b.Err != nil && b.Err != io.EOF {
|
||
ac.incrCallsFailed()
|
||
} else {
|
||
@@ -81,7 +88,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
|
||
// - the current picker returns other errors and failfast is false.
|
||
// - the subConn returned by the current picker is not READY
|
||
// When one of these situations happens, pick blocks until the picker gets updated.
|
||
-func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||
+func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
|
||
var ch chan struct{}
|
||
|
||
var lastPickErr error
|
||
@@ -89,7 +96,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||
pw.mu.Lock()
|
||
if pw.done {
|
||
pw.mu.Unlock()
|
||
- return nil, nil, ErrClientConnClosing
|
||
+ return nil, balancer.PickResult{}, ErrClientConnClosing
|
||
}
|
||
|
||
if pw.picker == nil {
|
||
@@ -110,9 +117,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||
}
|
||
switch ctx.Err() {
|
||
case context.DeadlineExceeded:
|
||
- return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
|
||
+ return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
|
||
case context.Canceled:
|
||
- return nil, nil, status.Error(codes.Canceled, errStr)
|
||
+ return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
|
||
}
|
||
case <-ch:
|
||
}
|
||
@@ -124,14 +131,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||
pw.mu.Unlock()
|
||
|
||
pickResult, err := p.Pick(info)
|
||
-
|
||
if err != nil {
|
||
if err == balancer.ErrNoSubConnAvailable {
|
||
continue
|
||
}
|
||
- if _, ok := status.FromError(err); ok {
|
||
+ if st, ok := status.FromError(err); ok {
|
||
// Status error: end the RPC unconditionally with this status.
|
||
- return nil, nil, dropError{error: err}
|
||
+ // First restrict the code to the list allowed by gRFC A54.
|
||
+ if istatus.IsRestrictedControlPlaneCode(st) {
|
||
+ err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
|
||
+ }
|
||
+ return nil, balancer.PickResult{}, dropError{error: err}
|
||
}
|
||
// For all other errors, wait for ready RPCs should block and other
|
||
// RPCs should fail with unavailable.
|
||
@@ -139,7 +149,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||
lastPickErr = err
|
||
continue
|
||
}
|
||
- return nil, nil, status.Error(codes.Unavailable, err.Error())
|
||
+ return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
|
||
}
|
||
|
||
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
||
@@ -149,9 +159,10 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
||
}
|
||
if t := acw.getAddrConn().getReadyTransport(); t != nil {
|
||
if channelz.IsOn() {
|
||
- return t, doneChannelzWrapper(acw, pickResult.Done), nil
|
||
+ doneChannelzWrapper(acw, &pickResult)
|
||
+ return t, pickResult, nil
|
||
}
|
||
- return t, pickResult.Done, nil
|
||
+ return t, pickResult, nil
|
||
}
|
||
if pickResult.Done != nil {
|
||
// Calling done with nil error, no bytes sent and no bytes received.
|
||
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
|
||
index fb7a99e0a273..fc91b4d266de 100644
|
||
--- a/vendor/google.golang.org/grpc/pickfirst.go
|
||
+++ b/vendor/google.golang.org/grpc/pickfirst.go
|
||
@@ -51,7 +51,7 @@ type pickfirstBalancer struct {
|
||
|
||
func (b *pickfirstBalancer) ResolverError(err error) {
|
||
if logger.V(2) {
|
||
- logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
||
+ logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err)
|
||
}
|
||
if b.subConn == nil {
|
||
b.state = connectivity.TransientFailure
|
||
@@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
||
b.subConn = subConn
|
||
b.state = connectivity.Idle
|
||
b.cc.UpdateState(balancer.State{
|
||
- ConnectivityState: connectivity.Idle,
|
||
- Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}},
|
||
+ ConnectivityState: connectivity.Connecting,
|
||
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||
})
|
||
b.subConn.Connect()
|
||
return nil
|
||
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
|
||
index 0a1e975ad916..cd45547854f0 100644
|
||
--- a/vendor/google.golang.org/grpc/preloader.go
|
||
+++ b/vendor/google.golang.org/grpc/preloader.go
|
||
@@ -25,7 +25,7 @@ import (
|
||
|
||
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh
|
||
index 99db79fafcfb..a6f26c8ab0f0 100644
|
||
--- a/vendor/google.golang.org/grpc/regenerate.sh
|
||
+++ b/vendor/google.golang.org/grpc/regenerate.sh
|
||
@@ -57,7 +57,8 @@ LEGACY_SOURCES=(
|
||
${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
|
||
${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
|
||
profiling/proto/service.proto
|
||
- reflection/grpc_reflection_v1alpha/reflection.proto
|
||
+ ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
|
||
+ ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
|
||
)
|
||
|
||
# Generates only the new gRPC Service symbols
|
||
@@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/
|
||
# see grpc_testing_not_regenerate/README.md for details.
|
||
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
|
||
|
||
-# grpc/testing does not have a go_package option.
|
||
-mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/
|
||
-mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/
|
||
-
|
||
cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
|
||
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
|
||
index ca2e35a3596f..654e9ce69f4a 100644
|
||
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
|
||
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
|
||
@@ -24,6 +24,7 @@ import (
|
||
"context"
|
||
"net"
|
||
"net/url"
|
||
+ "strings"
|
||
|
||
"google.golang.org/grpc/attributes"
|
||
"google.golang.org/grpc/credentials"
|
||
@@ -96,7 +97,7 @@ const (
|
||
|
||
// Address represents a server the client connects to.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -236,20 +237,17 @@ type ClientConn interface {
|
||
//
|
||
// Examples:
|
||
//
|
||
-// - "dns://some_authority/foo.bar"
|
||
-// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
|
||
-// - "foo.bar"
|
||
-// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
|
||
-// - "unknown_scheme://authority/endpoint"
|
||
-// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
||
+// - "dns://some_authority/foo.bar"
|
||
+// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
|
||
+// - "foo.bar"
|
||
+// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
|
||
+// - "unknown_scheme://authority/endpoint"
|
||
+// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
|
||
type Target struct {
|
||
// Deprecated: use URL.Scheme instead.
|
||
Scheme string
|
||
// Deprecated: use URL.Host instead.
|
||
Authority string
|
||
- // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when
|
||
- // the former is empty.
|
||
- Endpoint string
|
||
// URL contains the parsed dial target with an optional default scheme added
|
||
// to it if the original dial target contained no scheme or contained an
|
||
// unregistered scheme. Any query params specified in the original dial
|
||
@@ -257,6 +255,24 @@ type Target struct {
|
||
URL url.URL
|
||
}
|
||
|
||
+// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
|
||
+// or `URL.Opaque`. The latter is used when the former is empty.
|
||
+func (t Target) Endpoint() string {
|
||
+ endpoint := t.URL.Path
|
||
+ if endpoint == "" {
|
||
+ endpoint = t.URL.Opaque
|
||
+ }
|
||
+ // For targets of the form "[scheme]://[authority]/endpoint, the endpoint
|
||
+ // value returned from url.Parse() contains a leading "/". Although this is
|
||
+ // in accordance with RFC 3986, we do not want to break existing resolver
|
||
+ // implementations which expect the endpoint without the leading "/". So, we
|
||
+ // end up stripping the leading "/" here. But this will result in an
|
||
+ // incorrect parsing for something like "unix:///path/to/socket". Since we
|
||
+ // own the "unix" resolver, we can workaround in the unix resolver by using
|
||
+ // the `URL` field.
|
||
+ return strings.TrimPrefix(endpoint, "/")
|
||
+}
|
||
+
|
||
// Builder creates a resolver that will be used to watch name resolution updates.
|
||
type Builder interface {
|
||
// Build creates a new resolver for the given target.
|
||
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
|
||
index 5d407b004b0e..cb7020ebecd7 100644
|
||
--- a/vendor/google.golang.org/grpc/rpc_util.go
|
||
+++ b/vendor/google.golang.org/grpc/rpc_util.go
|
||
@@ -25,7 +25,6 @@ import (
|
||
"encoding/binary"
|
||
"fmt"
|
||
"io"
|
||
- "io/ioutil"
|
||
"math"
|
||
"strings"
|
||
"sync"
|
||
@@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
|
||
return &gzipCompressor{
|
||
pool: sync.Pool{
|
||
New: func() interface{} {
|
||
- w, err := gzip.NewWriterLevel(ioutil.Discard, level)
|
||
+ w, err := gzip.NewWriterLevel(io.Discard, level)
|
||
if err != nil {
|
||
panic(err)
|
||
}
|
||
@@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
|
||
z.Close()
|
||
d.pool.Put(z)
|
||
}()
|
||
- return ioutil.ReadAll(z)
|
||
+ return io.ReadAll(z)
|
||
}
|
||
|
||
func (d *gzipDecompressor) Type() string {
|
||
@@ -198,7 +197,7 @@ func Header(md *metadata.MD) CallOption {
|
||
// HeaderCallOption is a CallOption for collecting response header metadata.
|
||
// The metadata field will be populated *after* the RPC completes.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -220,7 +219,7 @@ func Trailer(md *metadata.MD) CallOption {
|
||
// TrailerCallOption is a CallOption for collecting response trailer metadata.
|
||
// The metadata field will be populated *after* the RPC completes.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -242,7 +241,7 @@ func Peer(p *peer.Peer) CallOption {
|
||
// PeerCallOption is a CallOption for collecting the identity of the remote
|
||
// peer. The peer field will be populated *after* the RPC completes.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -282,7 +281,7 @@ func FailFast(failFast bool) CallOption {
|
||
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
|
||
// fast or not.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -297,7 +296,8 @@ func (o FailFastCallOption) before(c *callInfo) error {
|
||
func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||
|
||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
|
||
-// in bytes the client can receive.
|
||
+// in bytes the client can receive. If this is not set, gRPC uses the default
|
||
+// 4MB.
|
||
func MaxCallRecvMsgSize(bytes int) CallOption {
|
||
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
|
||
}
|
||
@@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption {
|
||
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
|
||
// size in bytes the client can receive.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -320,7 +320,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
|
||
func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||
|
||
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
|
||
-// in bytes the client can send.
|
||
+// in bytes the client can send. If this is not set, gRPC uses the default
|
||
+// `math.MaxInt32`.
|
||
func MaxCallSendMsgSize(bytes int) CallOption {
|
||
return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
|
||
}
|
||
@@ -328,7 +329,7 @@ func MaxCallSendMsgSize(bytes int) CallOption {
|
||
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
|
||
// size in bytes the client can send.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -351,7 +352,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
|
||
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
|
||
// credentials to use for the call.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -369,7 +370,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||
// sending the request. If WithCompressor is also set, UseCompressor has
|
||
// higher priority.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -379,7 +380,7 @@ func UseCompressor(name string) CallOption {
|
||
|
||
// CompressorCallOption is a CallOption that indicates the compressor to use.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -416,7 +417,7 @@ func CallContentSubtype(contentSubtype string) CallOption {
|
||
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
|
||
// used for marshaling messages.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -444,7 +445,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||
// This function is provided for advanced users; prefer to use only
|
||
// CallContentSubtype to select a registered codec instead.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -455,7 +456,7 @@ func ForceCodec(codec encoding.Codec) CallOption {
|
||
// ForceCodecCallOption is a CallOption that indicates the codec used for
|
||
// marshaling messages.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -480,7 +481,7 @@ func CallCustomCodec(codec Codec) CallOption {
|
||
// CustomCodecCallOption is a CallOption that indicates the codec used for
|
||
// marshaling messages.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -497,7 +498,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
|
||
// used for buffering this RPC's requests for retry purposes.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -508,7 +509,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption {
|
||
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
|
||
// memory to be used for caching this RPC for retry purposes.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
@@ -548,10 +549,11 @@ type parser struct {
|
||
// format. The caller owns the returned msg memory.
|
||
//
|
||
// If there is an error, possible values are:
|
||
-// * io.EOF, when no messages remain
|
||
-// * io.ErrUnexpectedEOF
|
||
-// * of type transport.ConnectionError
|
||
-// * an error from the status package
|
||
+// - io.EOF, when no messages remain
|
||
+// - io.ErrUnexpectedEOF
|
||
+// - of type transport.ConnectionError
|
||
+// - an error from the status package
|
||
+//
|
||
// No other error values or types must be returned, which also means
|
||
// that the underlying io.Reader must not return an incompatible
|
||
// error.
|
||
@@ -710,7 +712,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
|
||
}
|
||
if err != nil {
|
||
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
||
}
|
||
if size > maxReceiveMessageSize {
|
||
// TODO: Revisit the error code. Currently keep it consistent with java
|
||
@@ -745,7 +747,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
|
||
}
|
||
// Read from LimitReader with limit max+1. So if the underlying
|
||
// reader is over limit, the result will be bigger than max.
|
||
- d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||
+ d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||
return d, len(d), err
|
||
}
|
||
|
||
@@ -758,7 +760,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf
|
||
return err
|
||
}
|
||
if err := c.Unmarshal(d, m); err != nil {
|
||
- return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||
+ return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
||
}
|
||
if payInfo != nil {
|
||
payInfo.uncompressedBytes = d
|
||
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
|
||
index f4dde72b41f8..d5a6e78be44d 100644
|
||
--- a/vendor/google.golang.org/grpc/server.go
|
||
+++ b/vendor/google.golang.org/grpc/server.go
|
||
@@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption {
|
||
return &joinServerOption{opts: opts}
|
||
}
|
||
|
||
-// WriteBufferSize determines how much data can be batched before doing a write on the wire.
|
||
-// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
|
||
-// The default value for this buffer is 32KB.
|
||
-// Zero will disable the write buffer such that each write will be on underlying connection.
|
||
+// WriteBufferSize determines how much data can be batched before doing a write
|
||
+// on the wire. The corresponding memory allocation for this buffer will be
|
||
+// twice the size to keep syscalls low. The default value for this buffer is
|
||
+// 32KB. Zero or negative values will disable the write buffer such that each
|
||
+// write will be on underlying connection.
|
||
// Note: A Send call may not directly translate to a write.
|
||
func WriteBufferSize(s int) ServerOption {
|
||
return newFuncServerOption(func(o *serverOptions) {
|
||
@@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption {
|
||
})
|
||
}
|
||
|
||
-// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
|
||
-// for one read syscall.
|
||
-// The default value for this buffer is 32KB.
|
||
-// Zero will disable read buffer for a connection so data framer can access the underlying
|
||
-// conn directly.
|
||
+// ReadBufferSize lets you set the size of read buffer, this determines how much
|
||
+// data can be read at most for one read syscall. The default value for this
|
||
+// buffer is 32KB. Zero or negative values will disable read buffer for a
|
||
+// connection so data framer can access the underlying conn directly.
|
||
func ReadBufferSize(s int) ServerOption {
|
||
return newFuncServerOption(func(o *serverOptions) {
|
||
o.readBufferSize = s
|
||
@@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||
}
|
||
|
||
func (s *Server) serveStreams(st transport.ServerTransport) {
|
||
- defer st.Close()
|
||
+ defer st.Close(errors.New("finished serving streams for the server transport"))
|
||
var wg sync.WaitGroup
|
||
|
||
var roundRobinCounter uint32
|
||
@@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil)
|
||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
|
||
if err != nil {
|
||
- http.Error(w, err.Error(), http.StatusInternalServerError)
|
||
+ // Errors returned from transport.NewServerHandlerTransport have
|
||
+ // already been written to w.
|
||
return
|
||
}
|
||
if !s.addConn(listenerAddressForServeHTTP, st) {
|
||
@@ -1046,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
|
||
s.mu.Lock()
|
||
defer s.mu.Unlock()
|
||
if s.conns == nil {
|
||
- st.Close()
|
||
+ st.Close(errors.New("Server.addConn called when server has already been stopped"))
|
||
return false
|
||
}
|
||
if s.drain {
|
||
@@ -1150,21 +1151,16 @@ func chainUnaryServerInterceptors(s *Server) {
|
||
|
||
func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
|
||
return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
|
||
- // the struct ensures the variables are allocated together, rather than separately, since we
|
||
- // know they should be garbage collected together. This saves 1 allocation and decreases
|
||
- // time/call by about 10% on the microbenchmark.
|
||
- var state struct {
|
||
- i int
|
||
- next UnaryHandler
|
||
- }
|
||
- state.next = func(ctx context.Context, req interface{}) (interface{}, error) {
|
||
- if state.i == len(interceptors)-1 {
|
||
- return interceptors[state.i](ctx, req, info, handler)
|
||
- }
|
||
- state.i++
|
||
- return interceptors[state.i-1](ctx, req, info, state.next)
|
||
- }
|
||
- return state.next(ctx, req)
|
||
+ return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
|
||
+ }
|
||
+}
|
||
+
|
||
+func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
|
||
+ if curr == len(interceptors)-1 {
|
||
+ return finalHandler
|
||
+ }
|
||
+ return func(ctx context.Context, req interface{}) (interface{}, error) {
|
||
+ return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
|
||
}
|
||
}
|
||
|
||
@@ -1303,7 +1299,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||
if err != nil {
|
||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||
- channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
|
||
+ channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||
}
|
||
return err
|
||
}
|
||
@@ -1470,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) {
|
||
|
||
func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
|
||
return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
|
||
- // the struct ensures the variables are allocated together, rather than separately, since we
|
||
- // know they should be garbage collected together. This saves 1 allocation and decreases
|
||
- // time/call by about 10% on the microbenchmark.
|
||
- var state struct {
|
||
- i int
|
||
- next StreamHandler
|
||
- }
|
||
- state.next = func(srv interface{}, ss ServerStream) error {
|
||
- if state.i == len(interceptors)-1 {
|
||
- return interceptors[state.i](srv, ss, info, handler)
|
||
- }
|
||
- state.i++
|
||
- return interceptors[state.i-1](srv, ss, info, state.next)
|
||
- }
|
||
- return state.next(srv, ss)
|
||
+ return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
|
||
+ }
|
||
+}
|
||
+
|
||
+func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
|
||
+ if curr == len(interceptors)-1 {
|
||
+ return finalHandler
|
||
+ }
|
||
+ return func(srv interface{}, stream ServerStream) error {
|
||
+ return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
|
||
}
|
||
}
|
||
|
||
@@ -1819,7 +1810,7 @@ func (s *Server) Stop() {
|
||
}
|
||
for _, cs := range conns {
|
||
for st := range cs {
|
||
- st.Close()
|
||
+ st.Close(errors.New("Server.Stop called"))
|
||
}
|
||
}
|
||
if s.opts.numServerWorkers > 0 {
|
||
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
|
||
index 01bbb2025aed..f22acace4253 100644
|
||
--- a/vendor/google.golang.org/grpc/service_config.go
|
||
+++ b/vendor/google.golang.org/grpc/service_config.go
|
||
@@ -226,7 +226,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||
var rsc jsonSC
|
||
err := json.Unmarshal([]byte(js), &rsc)
|
||
if err != nil {
|
||
- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||
+ logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||
return &serviceconfig.ParseResult{Err: err}
|
||
}
|
||
sc := ServiceConfig{
|
||
@@ -254,7 +254,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||
}
|
||
d, err := parseDuration(m.Timeout)
|
||
if err != nil {
|
||
- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||
+ logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||
return &serviceconfig.ParseResult{Err: err}
|
||
}
|
||
|
||
@@ -263,7 +263,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||
Timeout: d,
|
||
}
|
||
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||
- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||
+ logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||
return &serviceconfig.ParseResult{Err: err}
|
||
}
|
||
if m.MaxRequestMessageBytes != nil {
|
||
@@ -283,13 +283,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||
for i, n := range *m.Name {
|
||
path, err := n.generatePath()
|
||
if err != nil {
|
||
- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
|
||
+ logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||
return &serviceconfig.ParseResult{Err: err}
|
||
}
|
||
|
||
if _, ok := paths[path]; ok {
|
||
err = errDuplicatedName
|
||
- logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
|
||
+ logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||
return &serviceconfig.ParseResult{Err: err}
|
||
}
|
||
paths[path] = struct{}{}
|
||
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
|
||
index 73a2f926613e..35e7a20a04ba 100644
|
||
--- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
|
||
+++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
|
||
@@ -19,7 +19,7 @@
|
||
// Package serviceconfig defines types and methods for operating on gRPC
|
||
// service configs.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
|
||
index 6d163b6e3842..623be39f26ba 100644
|
||
--- a/vendor/google.golang.org/grpc/status/status.go
|
||
+++ b/vendor/google.golang.org/grpc/status/status.go
|
||
@@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status {
|
||
|
||
// FromError returns a Status representation of err.
|
||
//
|
||
-// - If err was produced by this package or implements the method `GRPCStatus()
|
||
-// *Status`, the appropriate Status is returned.
|
||
+// - If err was produced by this package or implements the method `GRPCStatus()
|
||
+// *Status`, the appropriate Status is returned.
|
||
//
|
||
-// - If err is nil, a Status is returned with codes.OK and no message.
|
||
+// - If err is nil, a Status is returned with codes.OK and no message.
|
||
//
|
||
-// - Otherwise, err is an error not compatible with this package. In this
|
||
-// case, a Status is returned with codes.Unknown and err's Error() message,
|
||
-// and ok is false.
|
||
+// - Otherwise, err is an error not compatible with this package. In this
|
||
+// case, a Status is returned with codes.Unknown and err's Error() message,
|
||
+// and ok is false.
|
||
func FromError(err error) (s *Status, ok bool) {
|
||
if err == nil {
|
||
return nil, true
|
||
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
|
||
index 0c16cfb2ea80..93231af2ac56 100644
|
||
--- a/vendor/google.golang.org/grpc/stream.go
|
||
+++ b/vendor/google.golang.org/grpc/stream.go
|
||
@@ -39,6 +39,7 @@ import (
|
||
imetadata "google.golang.org/grpc/internal/metadata"
|
||
iresolver "google.golang.org/grpc/internal/resolver"
|
||
"google.golang.org/grpc/internal/serviceconfig"
|
||
+ istatus "google.golang.org/grpc/internal/status"
|
||
"google.golang.org/grpc/internal/transport"
|
||
"google.golang.org/grpc/metadata"
|
||
"google.golang.org/grpc/peer"
|
||
@@ -195,6 +196,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
|
||
rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
|
||
rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
|
||
if err != nil {
|
||
+ if st, ok := status.FromError(err); ok {
|
||
+ // Restrict the code to the list allowed by gRFC A54.
|
||
+ if istatus.IsRestrictedControlPlaneCode(st) {
|
||
+ err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
|
||
+ }
|
||
+ return nil, err
|
||
+ }
|
||
return nil, toRPCErr(err)
|
||
}
|
||
|
||
@@ -408,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
||
ctx = trace.NewContext(ctx, trInfo.tr)
|
||
}
|
||
|
||
- if cs.cc.parsedTarget.Scheme == "xds" {
|
||
+ if cs.cc.parsedTarget.URL.Scheme == "xds" {
|
||
// Add extra metadata (metadata that will be added by transport) to context
|
||
// so the balancer can see them.
|
||
ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
|
||
@@ -430,7 +438,7 @@ func (a *csAttempt) getTransport() error {
|
||
cs := a.cs
|
||
|
||
var err error
|
||
- a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||
+ a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||
if err != nil {
|
||
if de, ok := err.(dropError); ok {
|
||
err = de.error
|
||
@@ -447,6 +455,25 @@ func (a *csAttempt) getTransport() error {
|
||
func (a *csAttempt) newStream() error {
|
||
cs := a.cs
|
||
cs.callHdr.PreviousAttempts = cs.numRetries
|
||
+
|
||
+ // Merge metadata stored in PickResult, if any, with existing call metadata.
|
||
+ // It is safe to overwrite the csAttempt's context here, since all state
|
||
+ // maintained in it are local to the attempt. When the attempt has to be
|
||
+ // retried, a new instance of csAttempt will be created.
|
||
+ if a.pickResult.Metatada != nil {
|
||
+ // We currently do not have a function it the metadata package which
|
||
+ // merges given metadata with existing metadata in a context. Existing
|
||
+ // function `AppendToOutgoingContext()` takes a variadic argument of key
|
||
+ // value pairs.
|
||
+ //
|
||
+ // TODO: Make it possible to retrieve key value pairs from metadata.MD
|
||
+ // in a form passable to AppendToOutgoingContext(), or create a version
|
||
+ // of AppendToOutgoingContext() that accepts a metadata.MD.
|
||
+ md, _ := metadata.FromOutgoingContext(a.ctx)
|
||
+ md = metadata.Join(md, a.pickResult.Metatada)
|
||
+ a.ctx = metadata.NewOutgoingContext(a.ctx, md)
|
||
+ }
|
||
+
|
||
s, err := a.t.NewStream(a.ctx, cs.callHdr)
|
||
if err != nil {
|
||
nse, ok := err.(*transport.NewStreamError)
|
||
@@ -521,12 +548,12 @@ type clientStream struct {
|
||
// csAttempt implements a single transport stream attempt within a
|
||
// clientStream.
|
||
type csAttempt struct {
|
||
- ctx context.Context
|
||
- cs *clientStream
|
||
- t transport.ClientTransport
|
||
- s *transport.Stream
|
||
- p *parser
|
||
- done func(balancer.DoneInfo)
|
||
+ ctx context.Context
|
||
+ cs *clientStream
|
||
+ t transport.ClientTransport
|
||
+ s *transport.Stream
|
||
+ p *parser
|
||
+ pickResult balancer.PickResult
|
||
|
||
finished bool
|
||
dc Decompressor
|
||
@@ -744,17 +771,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
|
||
|
||
func (cs *clientStream) Header() (metadata.MD, error) {
|
||
var m metadata.MD
|
||
+ noHeader := false
|
||
err := cs.withRetry(func(a *csAttempt) error {
|
||
var err error
|
||
m, err = a.s.Header()
|
||
+ if err == transport.ErrNoHeaders {
|
||
+ noHeader = true
|
||
+ return nil
|
||
+ }
|
||
return toRPCErr(err)
|
||
}, cs.commitAttemptLocked)
|
||
+
|
||
if err != nil {
|
||
cs.finish(err)
|
||
return nil, err
|
||
}
|
||
- if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
|
||
- // Only log if binary log is on and header has not been logged.
|
||
+
|
||
+ if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader {
|
||
+ // Only log if binary log is on and header has not been logged, and
|
||
+ // there is actually headers to log.
|
||
logEntry := &binarylog.ServerHeader{
|
||
OnClientSide: true,
|
||
Header: m,
|
||
@@ -1087,12 +1122,12 @@ func (a *csAttempt) finish(err error) {
|
||
tr = a.s.Trailer()
|
||
}
|
||
|
||
- if a.done != nil {
|
||
+ if a.pickResult.Done != nil {
|
||
br := false
|
||
if a.s != nil {
|
||
br = a.s.BytesReceived()
|
||
}
|
||
- a.done(balancer.DoneInfo{
|
||
+ a.pickResult.Done(balancer.DoneInfo{
|
||
Err: err,
|
||
Trailer: tr,
|
||
BytesSent: a.s != nil,
|
||
@@ -1448,6 +1483,9 @@ type ServerStream interface {
|
||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||
// to call SendMsg on the same stream in different goroutines.
|
||
+ //
|
||
+ // It is not safe to modify the message after calling SendMsg. Tracing
|
||
+ // libraries and stats handlers may use the message lazily.
|
||
SendMsg(m interface{}) error
|
||
// RecvMsg blocks until it receives a message into m or the stream is
|
||
// done. It returns io.EOF when the client has performed a CloseSend. On
|
||
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
|
||
index dbf34e6bb5f5..bfa5dfa40e4d 100644
|
||
--- a/vendor/google.golang.org/grpc/tap/tap.go
|
||
+++ b/vendor/google.golang.org/grpc/tap/tap.go
|
||
@@ -19,7 +19,7 @@
|
||
// Package tap defines the function handles which are executed on the transport
|
||
// layer of gRPC-Go and related information.
|
||
//
|
||
-// Experimental
|
||
+// # Experimental
|
||
//
|
||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||
// later release.
|
||
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
|
||
index d472ca64307b..fe552c315be2 100644
|
||
--- a/vendor/google.golang.org/grpc/version.go
|
||
+++ b/vendor/google.golang.org/grpc/version.go
|
||
@@ -19,4 +19,4 @@
|
||
package grpc
|
||
|
||
// Version is the current grpc version.
|
||
-const Version = "1.50.1"
|
||
+const Version = "1.53.0"
|
||
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
|
||
index c3fc8253b13a..3728aed04fc7 100644
|
||
--- a/vendor/google.golang.org/grpc/vet.sh
|
||
+++ b/vendor/google.golang.org/grpc/vet.sh
|
||
@@ -66,8 +66,21 @@ elif [[ "$#" -ne 0 ]]; then
|
||
die "Unknown argument(s): $*"
|
||
fi
|
||
|
||
+# - Check that generated proto files are up to date.
|
||
+if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||
+ PATH="/home/travis/bin:${PATH}" make proto && \
|
||
+ git status --porcelain 2>&1 | fail_on_output || \
|
||
+ (git status; git --no-pager diff; exit 1)
|
||
+fi
|
||
+
|
||
+if [[ -n "${VET_ONLY_PROTO}" ]]; then
|
||
+ exit 0
|
||
+fi
|
||
+
|
||
# - Ensure all source files contain a copyright message.
|
||
-not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go'
|
||
+# (Done in two parts because Darwin "git grep" has broken support for compound
|
||
+# exclusion matches.)
|
||
+(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output
|
||
|
||
# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
|
||
not grep 'func Test[^(]' *_test.go
|
||
@@ -81,7 +94,7 @@ not git grep -l 'x/net/context' -- "*.go"
|
||
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
|
||
|
||
# - Do not call grpclog directly. Use grpclog.Component instead.
|
||
-git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
|
||
+git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
|
||
|
||
# - Ensure all ptypes proto packages are renamed when importing.
|
||
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
|
||
@@ -91,13 +104,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.
|
||
|
||
misspell -error .
|
||
|
||
-# - Check that generated proto files are up to date.
|
||
-if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||
- PATH="/home/travis/bin:${PATH}" make proto && \
|
||
- git status --porcelain 2>&1 | fail_on_output || \
|
||
- (git status; git --no-pager diff; exit 1)
|
||
-fi
|
||
-
|
||
# - gofmt, goimports, golint (with exceptions for generated code), go vet,
|
||
# go mod tidy.
|
||
# Perform these checks on each module inside gRPC.
|
||
@@ -109,7 +115,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do
|
||
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
|
||
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
|
||
|
||
- go mod tidy
|
||
+ go mod tidy -compat=1.17
|
||
git status --porcelain 2>&1 | fail_on_output || \
|
||
(git status; git --no-pager diff; exit 1)
|
||
popd
|
||
@@ -119,8 +125,9 @@ done
|
||
#
|
||
# TODO(dfawley): don't use deprecated functions in examples or first-party
|
||
# plugins.
|
||
+# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs.
|
||
SC_OUT="$(mktemp)"
|
||
-staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true
|
||
+staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true
|
||
# Error if anything other than deprecation warnings are printed.
|
||
not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
|
||
# Only ignore the following deprecated types/fields/functions.
|
||
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
|
||
index 00ea2fecfb79..21d5d2cb18e1 100644
|
||
--- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
|
||
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
|
||
@@ -4,7 +4,7 @@
|
||
|
||
// Package protojson marshals and unmarshals protocol buffer messages as JSON
|
||
// format. It follows the guide at
|
||
-// https://developers.google.com/protocol-buffers/docs/proto3#json.
|
||
+// https://protobuf.dev/programming-guides/proto3#json.
|
||
//
|
||
// This package produces a different output than the standard "encoding/json"
|
||
// package, which does not operate correctly on protocol buffer messages.
|
||
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
|
||
index c85f8469480a..6c37d417449a 100644
|
||
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
|
||
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
|
||
@@ -814,16 +814,22 @@ func (d decoder) unmarshalTimestamp(m protoreflect.Message) error {
|
||
return d.unexpectedTokenError(tok)
|
||
}
|
||
|
||
- t, err := time.Parse(time.RFC3339Nano, tok.ParsedString())
|
||
+ s := tok.ParsedString()
|
||
+ t, err := time.Parse(time.RFC3339Nano, s)
|
||
if err != nil {
|
||
return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
|
||
}
|
||
- // Validate seconds. No need to validate nanos because time.Parse would have
|
||
- // covered that already.
|
||
+ // Validate seconds.
|
||
secs := t.Unix()
|
||
if secs < minTimestampSeconds || secs > maxTimestampSeconds {
|
||
return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString())
|
||
}
|
||
+ // Validate subseconds.
|
||
+ i := strings.LastIndexByte(s, '.') // start of subsecond field
|
||
+ j := strings.LastIndexAny(s, "Z-+") // start of timezone field
|
||
+ if i >= 0 && j >= i && j-i > len(".999999999") {
|
||
+ return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
|
||
+ }
|
||
|
||
fds := m.Descriptor().Fields()
|
||
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
|
||
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
|
||
index ce57f57ebd48..f4b4686cf9de 100644
|
||
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
|
||
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
|
||
@@ -3,7 +3,7 @@
|
||
// license that can be found in the LICENSE file.
|
||
|
||
// Package protowire parses and formats the raw wire encoding.
|
||
-// See https://developers.google.com/protocol-buffers/docs/encoding.
|
||
+// See https://protobuf.dev/programming-guides/encoding.
|
||
//
|
||
// For marshaling and unmarshaling entire protobuf messages,
|
||
// use the "google.golang.org/protobuf/proto" package instead.
|
||
@@ -29,12 +29,8 @@ const (
|
||
)
|
||
|
||
// IsValid reports whether the field number is semantically valid.
|
||
-//
|
||
-// Note that while numbers within the reserved range are semantically invalid,
|
||
-// they are syntactically valid in the wire format.
|
||
-// Implementations may treat records with reserved field numbers as unknown.
|
||
func (n Number) IsValid() bool {
|
||
- return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber
|
||
+ return MinValidNumber <= n && n <= MaxValidNumber
|
||
}
|
||
|
||
// Type represents the wire type.
|
||
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
|
||
index b13fd29e81e6..d043a6ebe0b9 100644
|
||
--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
|
||
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
|
||
@@ -294,7 +294,7 @@ func (d *Decoder) isValueNext() bool {
|
||
}
|
||
|
||
// consumeToken constructs a Token for given Kind with raw value derived from
|
||
-// current d.in and given size, and consumes the given size-lenght of it.
|
||
+// current d.in and given size, and consumes the given size-length of it.
|
||
func (d *Decoder) consumeToken(kind Kind, size int) Token {
|
||
tok := Token{
|
||
kind: kind,
|
||
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
|
||
index 427c62d037fc..87853e786d0d 100644
|
||
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
|
||
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
|
||
@@ -412,12 +412,13 @@ func (d *Decoder) parseFieldName() (tok Token, err error) {
|
||
// Field number. Identify if input is a valid number that is not negative
|
||
// and is decimal integer within 32-bit range.
|
||
if num := parseNumber(d.in); num.size > 0 {
|
||
+ str := num.string(d.in)
|
||
if !num.neg && num.kind == numDec {
|
||
- if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil {
|
||
+ if _, err := strconv.ParseInt(str, 10, 32); err == nil {
|
||
return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil
|
||
}
|
||
}
|
||
- return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size])
|
||
+ return Token{}, d.newSyntaxError("invalid field number: %s", str)
|
||
}
|
||
|
||
return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in))
|
||
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
|
||
index 81a5d8c86139..45c81f0298e2 100644
|
||
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
|
||
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
|
||
@@ -15,17 +15,12 @@ func (d *Decoder) parseNumberValue() (Token, bool) {
|
||
if num.neg {
|
||
numAttrs |= isNegative
|
||
}
|
||
- strSize := num.size
|
||
- last := num.size - 1
|
||
- if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') {
|
||
- strSize = last
|
||
- }
|
||
tok := Token{
|
||
kind: Scalar,
|
||
attrs: numberValue,
|
||
pos: len(d.orig) - len(d.in),
|
||
raw: d.in[:num.size],
|
||
- str: string(d.in[:strSize]),
|
||
+ str: num.string(d.in),
|
||
numAttrs: numAttrs,
|
||
}
|
||
d.consume(num.size)
|
||
@@ -46,6 +41,27 @@ type number struct {
|
||
kind uint8
|
||
neg bool
|
||
size int
|
||
+ // if neg, this is the length of whitespace and comments between
|
||
+ // the minus sign and the rest fo the number literal
|
||
+ sep int
|
||
+}
|
||
+
|
||
+func (num number) string(data []byte) string {
|
||
+ strSize := num.size
|
||
+ last := num.size - 1
|
||
+ if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') {
|
||
+ strSize = last
|
||
+ }
|
||
+ if num.neg && num.sep > 0 {
|
||
+ // strip whitespace/comments between negative sign and the rest
|
||
+ strLen := strSize - num.sep
|
||
+ str := make([]byte, strLen)
|
||
+ str[0] = data[0]
|
||
+ copy(str[1:], data[num.sep+1:strSize])
|
||
+ return string(str)
|
||
+ }
|
||
+ return string(data[:strSize])
|
||
+
|
||
}
|
||
|
||
// parseNumber constructs a number object from given input. It allows for the
|
||
@@ -67,19 +83,22 @@ func parseNumber(input []byte) number {
|
||
}
|
||
|
||
// Optional -
|
||
+ var sep int
|
||
if s[0] == '-' {
|
||
neg = true
|
||
s = s[1:]
|
||
size++
|
||
+ // Consume any whitespace or comments between the
|
||
+ // negative sign and the rest of the number
|
||
+ lenBefore := len(s)
|
||
+ s = consume(s, 0)
|
||
+ sep = lenBefore - len(s)
|
||
+ size += sep
|
||
if len(s) == 0 {
|
||
return number{}
|
||
}
|
||
}
|
||
|
||
- // C++ allows for whitespace and comments in between the negative sign and
|
||
- // the rest of the number. This logic currently does not but is consistent
|
||
- // with v1.
|
||
-
|
||
switch {
|
||
case s[0] == '0':
|
||
if len(s) > 1 {
|
||
@@ -116,7 +135,7 @@ func parseNumber(input []byte) number {
|
||
if len(s) > 0 && !isDelim(s[0]) {
|
||
return number{}
|
||
}
|
||
- return number{kind: kind, neg: neg, size: size}
|
||
+ return number{kind: kind, neg: neg, size: size, sep: sep}
|
||
}
|
||
}
|
||
s = s[1:]
|
||
@@ -188,5 +207,5 @@ func parseNumber(input []byte) number {
|
||
return number{}
|
||
}
|
||
|
||
- return number{kind: kind, neg: neg, size: size}
|
||
+ return number{kind: kind, neg: neg, size: size, sep: sep}
|
||
}
|
||
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
|
||
index e3cdf1c20591..5c0e8f73f4e4 100644
|
||
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
|
||
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
|
||
@@ -50,6 +50,7 @@ const (
|
||
FileDescriptorProto_Options_field_name protoreflect.Name = "options"
|
||
FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info"
|
||
FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax"
|
||
+ FileDescriptorProto_Edition_field_name protoreflect.Name = "edition"
|
||
|
||
FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name"
|
||
FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package"
|
||
@@ -63,6 +64,7 @@ const (
|
||
FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options"
|
||
FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info"
|
||
FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax"
|
||
+ FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition"
|
||
)
|
||
|
||
// Field numbers for google.protobuf.FileDescriptorProto.
|
||
@@ -79,6 +81,7 @@ const (
|
||
FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8
|
||
FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9
|
||
FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12
|
||
+ FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13
|
||
)
|
||
|
||
// Names for google.protobuf.DescriptorProto.
|
||
@@ -494,26 +497,29 @@ const (
|
||
|
||
// Field names for google.protobuf.MessageOptions.
|
||
const (
|
||
- MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format"
|
||
- MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor"
|
||
- MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
|
||
- MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
|
||
- MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
|
||
+ MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format"
|
||
+ MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor"
|
||
+ MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
|
||
+ MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
|
||
+ MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
|
||
+ MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
|
||
|
||
- MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
|
||
- MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor"
|
||
- MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
|
||
- MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
|
||
- MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
|
||
+ MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
|
||
+ MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor"
|
||
+ MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
|
||
+ MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
|
||
+ MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts"
|
||
+ MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
|
||
)
|
||
|
||
// Field numbers for google.protobuf.MessageOptions.
|
||
const (
|
||
- MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1
|
||
- MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2
|
||
- MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
|
||
- MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
|
||
- MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
|
||
+ MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1
|
||
+ MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2
|
||
+ MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
|
||
+ MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
|
||