commit e989f0a25f571c256e2bae90ec8fe8146bcef05f Author: Lucas Serven Date: Fri Jan 18 02:50:10 2019 +0100 init diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c56943e --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.cache/ +.container* +.push* +bin/ diff --git a/.header b/.header new file mode 100644 index 0000000..8de4725 --- /dev/null +++ b/.header @@ -0,0 +1,13 @@ +// Copyright YEAR the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..69969b2 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,22 @@ +sudo: required + +language: go + +services: + - docker + +go: + - 1.11.1 + +before_install: + - go get -u golang.org/x/lint/golint + +script: + - make + - make unit + - make lint + - make container + +after_success: + - docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD" + - make push && make push-latest diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..a4b9091 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,6 @@ +FROM alpine +MAINTAINER squat +RUN echo "@testing http://nl.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \ + apk add --no-cache ipset iptables wireguard-tools@testing +COPY bin/kg /opt/bin/ +ENTRYPOINT ["/opt/bin/kg"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..ecc1b6d --- /dev/null +++ b/Makefile @@ -0,0 +1,130 @@ +.PHONY: all push container clean container-name container-latest push-latest fmt lint test unit vendor header + +BINS := $(addprefix bin/,kg kgctl) +PROJECT := kilo +PKG := github.com/squat/$(PROJECT) +REGISTRY ?= index.docker.io +IMAGE ?= squat/$(PROJECT) + +TAG := $(shell git describe --abbrev=0 --tags HEAD 2>/dev/null) +COMMIT := $(shell git rev-parse HEAD) +VERSION := $(COMMIT) +ifneq ($(TAG),) + ifeq ($(COMMIT), $(shell git rev-list -n1 $(TAG))) + VERSION := $(TAG) + endif +endif +DIRTY := $(shell test -z "$$(git diff --shortstat 2>/dev/null)" || echo -dirty) +VERSION := $(VERSION)$(DIRTY) +LD_FLAGS := -ldflags '-X $(PKG)/pkg/version.Version=$(VERSION)' +SRC := $(shell find . -type f -name '*.go' -not -path "./vendor/*") +GO_FILES ?= $$(find . -name '*.go' -not -path './vendor/*') +GO_PKGS ?= $$(go list ./... | grep -v "$(PKG)/vendor") + +BUILD_IMAGE ?= golang:1.11.1-alpine + +all: build + +build: $(BINS) + +$(BINS): $(SRC) go.mod + @mkdir -p bin + @echo "building: $@" + @docker run --rm \ + -u $$(id -u):$$(id -g) \ + -v $$(pwd):/$(PROJECT) \ + -w /$(PROJECT) \ + $(BUILD_IMAGE) \ + /bin/sh -c " \ + rm -rf ./.cache && \ + GOOS=linux \ + GOCACHE=./.cache \ + CGO_ENABLED=0 \ + go build -mod=vendor -o $@ \ + $(LD_FLAGS) \ + ./cmd/$(@F)/... \ + " + +fmt: + @echo $(GO_PKGS) + gofmt -w -s $(GO_FILES) + +lint: header + @echo 'go vet $(GO_PKGS)' + @vet_res=$$(go vet $(GO_PKGS) 2>&1); if [ -n "$$vet_res" ]; then \ + echo ""; \ + echo "Go vet found issues. Please check the reported issues"; \ + echo "and fix them if necessary before submitting the code for review:"; \ + echo "$$vet_res"; \ + exit 1; \ + fi + @echo 'golint $(GO_PKGS)' + @lint_res=$$(golint $(GO_PKGS)); if [ -n "$$lint_res" ]; then \ + echo ""; \ + echo "Golint found style issues. Please check the reported issues"; \ + echo "and fix them if necessary before submitting the code for review:"; \ + echo "$$lint_res"; \ + exit 1; \ + fi + @echo 'gofmt -d -s $(GO_FILES)' + @fmt_res=$$(gofmt -d -s $(GO_FILES)); if [ -n "$$fmt_res" ]; then \ + echo ""; \ + echo "Gofmt found style issues. Please check the reported issues"; \ + echo "and fix them if necessary before submitting the code for review:"; \ + echo "$$fmt_res"; \ + exit 1; \ + fi + +unit: + go test --race ./... + +test: lint unit + +header: .header + @HEADER=$$(sed "s/YEAR/$$(date '+%Y')/" .header); \ + FILES=; \ + for f in $(GO_FILES); do \ + FILE=$$(head -n $$(wc -l .header | awk '{print $$1}') $$f); \ + [ "$$FILE" != "$$HEADER" ] && FILES="$$FILES$$f "; \ + done; \ + if [ -n "$$FILES" ]; then \ + printf 'the following files are missing the license header: %s\n' "$$FILES"; \ + exit 1; \ + fi + +container: .container-$(VERSION) container-name +.container-$(VERSION): $(BINS) Dockerfile + @docker build -t $(IMAGE):$(VERSION) . + @docker images -q $(IMAGE):$(VERSION) > $@ + +container-latest: .container-$(VERSION) + @docker tag $(IMAGE):$(VERSION) $(IMAGE):latest + @echo "container: $(IMAGE):latest" + +container-name: + @echo "container: $(IMAGE):$(VERSION)" + +push: .push-$(VERSION) push-name +.push-$(VERSION): .container-$(VERSION) + @docker push $(REGISTRY)/$(IMAGE):$(VERSION) + @docker images -q $(IMAGE):$(VERSION) > $@ + +push-latest: container-latest + @docker push $(REGISTRY)/$(IMAGE):latest + @echo "pushed: $(IMAGE):latest" + +push-name: + @echo "pushed: $(IMAGE):$(VERSION)" + +clean: container-clean bin-clean + rm -r .cache + +container-clean: + rm -rf .container-* .push-* + +bin-clean: + rm -rf bin + +vendor: + go mod tidy + go mod vendor diff --git a/README.md b/README.md new file mode 100644 index 0000000..7e1f59e --- /dev/null +++ b/README.md @@ -0,0 +1,88 @@ +

+ +# Kilo + +Kilo is a multi-cloud network overlay built on WireGuard and designed for Kubernetes. + +[![Build Status](https://travis-ci.org/squat/kilo.svg?branch=master)](https://travis-ci.org/squat/kilo) +[![Go Report Card](https://goreportcard.com/badge/github.com/squat/kilo)](https://goreportcard.com/report/github.com/squat/kilo) + +## Overview + +Kilo connects nodes in a cluster by providing an encrypted layer 3 network that can span across data centers and public clouds. +By allowing pools of nodes in different locations to communicate securely, Kilo enables the operation of multi-cloud clusters. + +## How it works + +Kilo uses [WireGuard](https://www.wireguard.com/), a performant and secure VPN, to create a mesh between the different logical locations in a cluster. +The Kilo agent, `kg`, runs on every node in the cluster, setting up the public and private keys for the VPN as well as the necessary rules to route packets between locations. + +Kilo can operate as an add-on complimenting the cluster-networking solution currently installed on a cluster. +This means that if a cluster uses, for example, Calico for networking, Kilo can be installed on top to enable pools of nodes in different locations to join; Kilo will take care of the network between locations, while Calico will take care of the network within locations. + +## Installing on Kubernetes + +Kilo can be installed on any Kubernetes cluster either pre- or post-bring-up. + +### Step 1: install WireGuard + +Kilo requires the WireGuard kernel module on all nodes in the cluster. +For most Linux distributions, this can be installed using the system package manager. +For Container Linux, WireGuard can be easily installed using a DaemonSet: + +```shell +kubectl apply -f https://raw.githubusercontent.com/squat/modulus/master/wireguard/daemonset.yaml +``` + +### Step 2: open WireGuard port + +The nodes in the mesh will require an open UDP port in order to communicate. +By default, Kilo uses UDP port 51820. + +### Step 3: specify locations + +Kilo needs to know which nodes are in each location. +If the cluster does not automatically set the [failure-domain.beta.kubernetes.io/region](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domain-beta-kubernetes-io-region) node label, then the [kilo.squat.ai/location](./docs/annotations.md#location) annotation can be used. +For example, the following snippet could be used to annotate all nodes with `GCP` in the name: + +```shell +for node in $(kubectl get nodes | grep -i gcp | awk '{print $1}'); do kubectl annotate node $node kilo.squat.ai/location="gcp"; done +``` + +### Step 4: ensure nodes have public IP + +At least one node in each location must have a public IP address. +If the public IP address is not automatically configured on the node's Ethernet device, it can be manually specified using the [kilo.squat.ai/force-external-ip](./docs/annotations.md#force-external-ip) annotation. + +### Step 5: install Kilo! + +Kilo can be installed by deploying a DaemonSet to the cluster. + +To run Kilo on kubeadm: + +```shell +kubectl apply -f https://raw.githubusercontent.com/squat/kilo/master/manifests/kilo-kubeadm.yaml +``` + +To run Kilo on bootkube: + +```shell +kubectl apply -f https://raw.githubusercontent.com/squat/kilo/master/manifests/kilo-bootkube.yaml +``` + +To run Kilo on Typhoon: + +```shell +kubectl apply -f https://raw.githubusercontent.com/squat/kilo/master/manifests/kilo-typhoon.yaml +``` + +## Analysis + +The topology of a Kilo network can be analyzed using the `kgctl` binary. +For example, the `graph` command can be used to generate a graph of the network in Graphviz format: + +```shell +kgctl graph --kubeconfig=$KUBECONFIG | twopi -Tsvg > cluster.svg +``` + + diff --git a/cluster.svg b/cluster.svg new file mode 100644 index 0000000..0bdc476 --- /dev/null +++ b/cluster.svg @@ -0,0 +1,217 @@ + + + + + + +kilo + +10.4.0.0/16 + + +ip-10-0-1-81 + +aws +ip-10-0-1-81 +10.2.1.0/24 +10.0.1.81 +10.4.0.1 + + + +ip-10-0-18-139 + +aws +ip-10-0-18-139 +10.2.2.0/24 +10.0.18.139 + + + +ip-10-0-1-81->ip-10-0-18-139 + + + + + + +ip-10-0-25-19 + +aws +ip-10-0-25-19 +10.2.0.0/24 +10.0.25.19 + + + +ip-10-0-1-81->ip-10-0-25-19 + + + + + + +ip-10-0-30-70 + +aws +ip-10-0-30-70 +10.2.4.0/24 +10.0.30.70 + + + +ip-10-0-1-81->ip-10-0-30-70 + + + + + + +ip-10-0-37-193 + +aws +ip-10-0-37-193 +10.2.3.0/24 +10.0.37.193 + + + +ip-10-0-1-81->ip-10-0-37-193 + + + + + + +ip-10-0-4-141 + +aws +ip-10-0-4-141 +10.2.7.0/24 +10.0.4.141 + + + +ip-10-0-1-81->ip-10-0-4-141 + + + + + + +ip-10-0-4-62 + +aws +ip-10-0-4-62 +10.2.5.0/24 +10.0.4.62 + + + +ip-10-0-1-81->ip-10-0-4-62 + + + + + + +ip-10-0-47-198 + +aws +ip-10-0-47-198 +10.2.6.0/24 +10.0.47.198 + + + +ip-10-0-1-81->ip-10-0-47-198 + + + + + + +kilo-gcp-worker0.squat.ai + +gcp +kilo-gcp-worker0.squat.ai +10.2.12.0/24 +10.1.96.8 +10.4.0.2 + + + +ip-10-0-1-81->kilo-gcp-worker0.squat.ai + + + + + + +kilo-gcp-worker1.squat.ai + +gcp +kilo-gcp-worker1.squat.ai +10.2.8.0/24 +10.1.96.7 + + + +kilo-gcp-worker0.squat.ai->kilo-gcp-worker1.squat.ai + + + + + + +kilo-gcp-worker2.squat.ai + +gcp +kilo-gcp-worker2.squat.ai +10.2.10.0/24 +10.1.96.4 + + + +kilo-gcp-worker0.squat.ai->kilo-gcp-worker2.squat.ai + + + + + + +kilo-gcp-worker3.squat.ai + +gcp +kilo-gcp-worker3.squat.ai +10.2.9.0/24 +10.1.96.5 + + + +kilo-gcp-worker0.squat.ai->kilo-gcp-worker3.squat.ai + + + + + + +kilo-gcp-worker4.squat.ai + +gcp +kilo-gcp-worker4.squat.ai +10.2.11.0/24 +10.1.96.6 + + + +kilo-gcp-worker0.squat.ai->kilo-gcp-worker4.squat.ai + + + + + + diff --git a/cmd/kg/main.go b/cmd/kg/main.go new file mode 100644 index 0000000..1d7d0ef --- /dev/null +++ b/cmd/kg/main.go @@ -0,0 +1,232 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "errors" + "flag" + "fmt" + "net" + "net/http" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/run" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + + "github.com/squat/kilo/pkg/k8s" + "github.com/squat/kilo/pkg/mesh" + "github.com/squat/kilo/pkg/version" +) + +const ( + logLevelAll = "all" + logLevelDebug = "debug" + logLevelInfo = "info" + logLevelWarn = "warn" + logLevelError = "error" + logLevelNone = "none" +) + +var ( + availableBackends = strings.Join([]string{ + k8s.Backend, + }, ", ") + availableEncapsulations = strings.Join([]string{ + string(mesh.NeverEncapsulate), + string(mesh.CrossSubnetEncapsulate), + string(mesh.AlwaysEncapsulate), + }, ", ") + availableGranularities = strings.Join([]string{ + string(mesh.DataCenterGranularity), + string(mesh.NodeGranularity), + }, ", ") + availableLogLevels = strings.Join([]string{ + logLevelAll, + logLevelDebug, + logLevelInfo, + logLevelWarn, + logLevelError, + logLevelNone, + }, ", ") +) + +// Main is the principal function for the binary, wrapped only by `main` for convenience. +func Main() error { + backend := flag.String("backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends)) + encapsulate := flag.String("encapsulate", string(mesh.AlwaysEncapsulate), fmt.Sprintf("When should Kilo encapsulate packets within a location. Possible values: %s", availableEncapsulations)) + granularity := flag.String("mesh-granularity", string(mesh.DataCenterGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities)) + kubeconfig := flag.String("kubeconfig", "", "Path to kubeconfig.") + hostname := flag.String("hostname", "", "Hostname of the node on which this process is running.") + listen := flag.String("listen", "localhost:1107", "The address at which to listen for health and metrics.") + local := flag.Bool("local", true, "Should Kilo manage routes within a location.") + logLevel := flag.String("log-level", logLevelInfo, fmt.Sprintf("Log level to use. Possible values: %s", availableLogLevels)) + master := flag.String("master", "", "The address of the Kubernetes API server (overrides any value in kubeconfig).") + port := flag.Int("port", 51820, "The port over which WireGuard peers should communicate.") + subnet := flag.String("subnet", "10.4.0.0/16", "CIDR from which to allocate addressees to WireGuard interfaces.") + printVersion := flag.Bool("version", false, "Print version and exit") + flag.Parse() + + if *printVersion { + fmt.Println(version.Version) + return nil + } + + _, s, err := net.ParseCIDR(*subnet) + if err != nil { + return fmt.Errorf("failed to parse %q as CIDR: %v", *subnet, err) + } + + if *hostname == "" { + var err error + *hostname, err = os.Hostname() + if *hostname == "" || err != nil { + return errors.New("failed to determine hostname") + } + } + + logger := log.NewJSONLogger(log.NewSyncWriter(os.Stdout)) + switch *logLevel { + case logLevelAll: + logger = level.NewFilter(logger, level.AllowAll()) + case logLevelDebug: + logger = level.NewFilter(logger, level.AllowDebug()) + case logLevelInfo: + logger = level.NewFilter(logger, level.AllowInfo()) + case logLevelWarn: + logger = level.NewFilter(logger, level.AllowWarn()) + case logLevelError: + logger = level.NewFilter(logger, level.AllowError()) + case logLevelNone: + logger = level.NewFilter(logger, level.AllowNone()) + default: + return fmt.Errorf("log level %v unknown; posible values are: %s", *logLevel, availableLogLevels) + } + logger = log.With(logger, "ts", log.DefaultTimestampUTC) + logger = log.With(logger, "caller", log.DefaultCaller) + + e := mesh.Encapsulate(*encapsulate) + switch e { + case mesh.NeverEncapsulate: + case mesh.CrossSubnetEncapsulate: + case mesh.AlwaysEncapsulate: + default: + return fmt.Errorf("encapsulation %v unknown; posible values are: %s", *encapsulate, availableEncapsulations) + } + + gr := mesh.Granularity(*granularity) + switch gr { + case mesh.DataCenterGranularity: + case mesh.NodeGranularity: + default: + return fmt.Errorf("mesh granularity %v unknown; posible values are: %s", *granularity, availableGranularities) + } + + var b mesh.Backend + switch *backend { + case k8s.Backend: + config, err := clientcmd.BuildConfigFromFlags(*master, *kubeconfig) + if err != nil { + return fmt.Errorf("failed to create Kubernetes config: %v", err) + } + client := kubernetes.NewForConfigOrDie(config) + b = k8s.New(client) + default: + return fmt.Errorf("backend %v unknown; posible values are: %s", *backend, availableBackends) + } + + m, err := mesh.New(b, e, gr, *hostname, *port, s, *local, log.With(logger, "component", "kilo")) + if err != nil { + return fmt.Errorf("failed to create Kilo mesh: %v", err) + } + + r := prometheus.NewRegistry() + r.MustRegister( + prometheus.NewGoCollector(), + prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), + ) + m.RegisterMetrics(r) + + var g run.Group + { + // Run the HTTP server. + mux := http.NewServeMux() + mux.HandleFunc("/health", func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + }) + mux.Handle("/metrics", promhttp.HandlerFor(r, promhttp.HandlerOpts{})) + l, err := net.Listen("tcp", *listen) + if err != nil { + return fmt.Errorf("failed to listen on %s: %v", *listen, err) + } + + g.Add(func() error { + if err := http.Serve(l, mux); err != nil && err != http.ErrServerClosed { + return fmt.Errorf("error: server exited unexpectedly: %v", err) + } + return nil + }, func(error) { + l.Close() + }) + } + + { + // Start the mesh. + g.Add(func() error { + logger.Log("msg", fmt.Sprintf("Starting Kilo network mesh '%v'.", version.Version)) + if err := m.Run(); err != nil { + return fmt.Errorf("error: Kilo exited unexpectedly: %v", err) + } + return nil + }, func(error) { + m.Stop() + }) + } + { + // Exit gracefully on SIGINT and SIGTERM. + term := make(chan os.Signal, 1) + signal.Notify(term, syscall.SIGINT, syscall.SIGTERM) + cancel := make(chan struct{}) + g.Add(func() error { + for { + select { + case <-term: + logger.Log("msg", "caught interrupt; gracefully cleaning up; see you next time!") + return nil + case <-cancel: + return nil + } + } + }, func(error) { + close(cancel) + }) + } + + return g.Run() +} + +func main() { + if err := Main(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} diff --git a/cmd/kgctl/graph.go b/cmd/kgctl/graph.go new file mode 100644 index 0000000..fec272a --- /dev/null +++ b/cmd/kgctl/graph.go @@ -0,0 +1,58 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/squat/kilo/pkg/mesh" +) + +func newGraph() *cobra.Command { + return &cobra.Command{ + Use: "graph", + Short: "Generates a graph of the Kilo network", + Long: "", + RunE: runGraph, + } +} + +func runGraph(_ *cobra.Command, _ []string) error { + ns, err := opts.backend.List() + if err != nil { + return fmt.Errorf("failed to list nodes: %v", err) + } + var hostname string + if len(ns) != 0 { + hostname = ns[0].Name + } + nodes := make(map[string]*mesh.Node) + for _, n := range ns { + if n.Ready() { + nodes[n.Name] = n + } + } + t, err := mesh.NewTopology(nodes, opts.granularity, hostname, 0, []byte{}, opts.subnet) + if err != nil { + return fmt.Errorf("failed to create topology: %v", err) + } + g, err := t.Dot() + if err != nil { + return fmt.Errorf("failed to generate graph: %v", err) + } + fmt.Println(g) + return nil +} diff --git a/cmd/kgctl/main.go b/cmd/kgctl/main.go new file mode 100644 index 0000000..413740d --- /dev/null +++ b/cmd/kgctl/main.go @@ -0,0 +1,124 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "net" + "os" + "strings" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + + "github.com/spf13/cobra" + "github.com/squat/kilo/pkg/k8s" + "github.com/squat/kilo/pkg/mesh" + "github.com/squat/kilo/pkg/version" +) + +const ( + logLevelAll = "all" + logLevelDebug = "debug" + logLevelInfo = "info" + logLevelWarn = "warn" + logLevelError = "error" + logLevelNone = "none" +) + +var ( + availableBackends = strings.Join([]string{ + k8s.Backend, + }, ", ") + availableGranularities = strings.Join([]string{ + string(mesh.DataCenterGranularity), + string(mesh.NodeGranularity), + }, ", ") + availableLogLevels = strings.Join([]string{ + logLevelAll, + logLevelDebug, + logLevelInfo, + logLevelWarn, + logLevelError, + logLevelNone, + }, ", ") + opts struct { + backend mesh.Backend + granularity mesh.Granularity + subnet *net.IPNet + } + backend string + granularity string + kubeconfig string + subnet string +) + +func runRoot(_ *cobra.Command, _ []string) error { + _, s, err := net.ParseCIDR(subnet) + if err != nil { + return fmt.Errorf("failed to parse %q as CIDR: %v", subnet, err) + } + opts.subnet = s + + opts.granularity = mesh.Granularity(granularity) + switch opts.granularity { + case mesh.DataCenterGranularity: + case mesh.NodeGranularity: + default: + return fmt.Errorf("mesh granularity %v unknown; posible values are: %s", granularity, availableGranularities) + } + + switch backend { + case k8s.Backend: + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return fmt.Errorf("failed to create Kubernetes config: %v", err) + } + client := kubernetes.NewForConfigOrDie(config) + opts.backend = k8s.New(client) + default: + return fmt.Errorf("backend %v unknown; posible values are: %s", backend, availableBackends) + } + + if err := opts.backend.Init(make(chan struct{})); err != nil { + return fmt.Errorf("failed to initialize backend: %v", err) + } + return nil +} + +func main() { + cmd := &cobra.Command{ + Use: "kgctl", + Short: "Manage a Kilo network", + Long: "", + PersistentPreRunE: runRoot, + Version: version.Version, + } + cmd.PersistentFlags().StringVar(&backend, "backend", k8s.Backend, fmt.Sprintf("The backend for the mesh. Possible values: %s", availableBackends)) + cmd.PersistentFlags().StringVar(&granularity, "mesh-granularity", string(mesh.DataCenterGranularity), fmt.Sprintf("The granularity of the network mesh to create. Possible values: %s", availableGranularities)) + cmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", "", "Path to kubeconfig.") + cmd.PersistentFlags().StringVar(&subnet, "subnet", "10.4.0.0/16", "CIDR from which to allocate addressees to WireGuard interfaces.") + + for _, subCmd := range []*cobra.Command{ + newGraph(), + } { + cmd.AddCommand(subCmd) + } + + if err := cmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} diff --git a/docs/annotations.md b/docs/annotations.md new file mode 100644 index 0000000..2b3b2b5 --- /dev/null +++ b/docs/annotations.md @@ -0,0 +1,33 @@ +# Annotations +The following annotations can be added to any Kubernetes Node object to configure the Kilo network. + +|Name|type|example| +|----|----|-------| +|[kilo.squat.ai/force-external-ip](#force-external-ip)|CIDR|`"55.55.55.55/32"`| +|[kilo.squat.ai/leader](#leader)|string|`""`| +|[kilo.squat.ai/location](#location)|string|`"gcp-east"`| + +### force-external-ip +Kilo requires at least one node in each location to have a publicly accessible IP address in order to create links to other locations. +The Kilo agent running on each node will use heuristics to automatically detect an external IP address for the node; however, in some circumstances it may be necessary to explicitly configure the IP address, for example: + * _no automatic public IP on ethernet device_: on some cloud providers it is common for nodes to be allocated a public IP address but for the Ethernet devices to only be automatically configured with the private network address; in this case the allocated public IP address should be specified; + * _multiple public IP addresses_: if a node has multiple public IPs but one is preferred, then the preferred IP address should be specified; + * _IPv6_: if a node has both public IPv4 and IPv6 addresses and the Kilo network should operate over IPv6, then the IPv6 address should be specified; + +### leader +By default, Kilo creates a network mesh at the data-center granularity. +This means that one leader node is selected from each location to be an edge server and act as the gateway to other locations; the network topology will be a full mesh between leaders. +Kilo automatically selects the leader for each location in a stable and deterministic manner to avoid churn in the network configuration, while giving preference to nodes that are known to have public IP addresses. +In some situations it may be desirable to manually select the leader for a location, for example: + * _firewall_: Kilo requires an open UDP port, which defaults to 51820, to communicate between locations; if only one node is configured to have that port open, then that node should be given the leader annotation; + * _bandwidth_: if certain nodes in the cluster have a higher bandwidth or lower latency Internet connection, then those nodes should be given the leader annotation; + +_Note_: multiple nodes within a single location can be given the leader annotation; in this case, Kilo will select one leader from the set of annotated nodes. + +### location +Kilo allows nodes in different logical or physical locations to route packets to one-another. +In order to know what connections to create, Kilo needs to know which nodes are in each location. +Kilo will try to infer each node's location from the [failure-domain.beta.kubernetes.io/region](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domain-beta-kubernetes-io-region) node label. +If the label is not present for a node, for example if running a bare-metal cluster or on an unsupported cloud provider, then the location annotation should be specified. + +_Note_: all nodes without a defined location will be considered to be in the default location `""`. diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..ee74c1c --- /dev/null +++ b/go.mod @@ -0,0 +1,61 @@ +module github.com/squat/kilo + +require ( + github.com/awalterschulze/gographviz v0.0.0-20181013152038-b2885df04310 + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect + github.com/coreos/go-iptables v0.4.0 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-kit/kit v0.8.0 + github.com/go-logfmt/logfmt v0.4.0 // indirect + github.com/go-stack/stack v1.8.0 // indirect + github.com/gogo/protobuf v1.2.0 // indirect + github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff // indirect + github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c // indirect + github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect + github.com/googleapis/gnostic v0.2.0 // indirect + github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect + github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f // indirect + github.com/hashicorp/golang-lru v0.5.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/json-iterator/go v1.1.5 // indirect + github.com/jtolds/gls v4.2.1+incompatible // indirect + github.com/kr/pretty v0.1.0 // indirect + github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/oklog/run v1.0.0 + github.com/onsi/ginkgo v1.7.0 // indirect + github.com/onsi/gomega v1.4.3 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v0.9.1 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect + github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 // indirect + github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a // indirect + github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect + github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect + github.com/spf13/cobra v0.0.3 + github.com/spf13/pflag v1.0.3 // indirect + github.com/stretchr/testify v1.2.2 // indirect + github.com/vishvananda/netlink v1.0.0 + github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect + golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 // indirect + golang.org/x/net v0.0.0-20181217023233-e147a9138326 // indirect + golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 // indirect + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect + golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 + golang.org/x/time v0.0.0-20181108054448-85acf8d2951c // indirect + google.golang.org/appengine v1.3.0 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.41.0 + gopkg.in/yaml.v2 v2.2.2 // indirect + k8s.io/api v0.0.0-20181130031204-d04500c8c3dd + k8s.io/apimachinery v0.0.0-20181215012845-4d029f033399 + k8s.io/client-go v10.0.0+incompatible + k8s.io/klog v0.1.0 // indirect + k8s.io/kube-openapi v0.0.0-20181114233023-0317810137be // indirect + sigs.k8s.io/yaml v1.1.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..ce19cdb --- /dev/null +++ b/go.sum @@ -0,0 +1,139 @@ +github.com/awalterschulze/gographviz v0.0.0-20181013152038-b2885df04310 h1:t+qxRrRtwNiUYA+Xh2jSXhoG2grnMCMKX4Fg6lx9X1U= +github.com/awalterschulze/gographviz v0.0.0-20181013152038-b2885df04310/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/coreos/go-iptables v0.4.0 h1:wh4UbVs8DhLUbpyq97GLJDKrQMjEDD63T1xE4CrsKzQ= +github.com/coreos/go-iptables v0.4.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff h1:kOkM9whyQYodu09SJ6W3NCsHG7crFaJILQ22Gozp3lg= +github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= +github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM= +github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w= +github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 h1:mKdxBk7AujPs8kU4m80U72y/zjbZ3UcXC7dClwKbUI0= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181217023233-e147a9138326 h1:iCzOf0xz39Tstp+Tu/WwyGjUXCk34QhQORRxBeXXTA4= +golang.org/x/net v0.0.0-20181217023233-e147a9138326/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 h1:uESlIz09WIHT2I+pasSXcpLYqYK8wHcdCetU3VuMBJE= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 h1:0oC8rFnE+74kEmuHZ46F6KHsMr5Gx2gUQPuNz28iQZM= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.41.0 h1:Ka3ViY6gNYSKiVy71zXBEqKplnV35ImDLVG+8uoIklE= +gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/api v0.0.0-20181130031204-d04500c8c3dd h1:5aHsneN62ehs/tdtS9tWZlhVk68V7yms/Qw7nsGmvCA= +k8s.io/api v0.0.0-20181130031204-d04500c8c3dd/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apimachinery v0.0.0-20181215012845-4d029f033399 h1:xdXaRQ7uNX4x6NpvxXASvlVXtKa8+WbCXK7Hjr6XZ6c= +k8s.io/apimachinery v0.0.0-20181215012845-4d029f033399/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/client-go v10.0.0+incompatible h1:F1IqCqw7oMBzDkqlcBymRq1450wD0eNqLE9jzUrIi34= +k8s.io/client-go v10.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/klog v0.1.0 h1:I5HMfc/DtuVaGR1KPwUrTc476K8NCqNBldC7H4dYEzk= +k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kube-openapi v0.0.0-20181114233023-0317810137be h1:aWEq4nbj7HRJ0mtKYjNSk/7X28Tl6TI6FeG8gKF+r7Q= +k8s.io/kube-openapi v0.0.0-20181114233023-0317810137be/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/kilo.svg b/kilo.svg new file mode 100644 index 0000000..7db9c12 --- /dev/null +++ b/kilo.svg @@ -0,0 +1,2 @@ + + diff --git a/manifests/kilo-bootkube.yaml b/manifests/kilo-bootkube.yaml new file mode 100644 index 0000000..bb5ea69 --- /dev/null +++ b/manifests/kilo-bootkube.yaml @@ -0,0 +1,34 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kilo + namespace: kube-system + labels: + app.kubernetes.io/name: kilo +spec: + template: + metadata: + labels: + app.kubernetes.io/name: kilo + spec: + hostNetwork: true + containers: + - name: kilo + image: squat/kilo + args: + - --kubeconfig=/etc/kubernetes/kubeconfig + securityContext: + privileged: true + volumeMounts: + - name: kubeconfig + mountPath: /etc/kubernetes/kubeconfig + readOnly: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: kubeconfig + hostPath: + path: /etc/kubernetes/kubeconfig diff --git a/manifests/kilo-kubeadm.yaml b/manifests/kilo-kubeadm.yaml new file mode 100644 index 0000000..3ea673b --- /dev/null +++ b/manifests/kilo-kubeadm.yaml @@ -0,0 +1,37 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kilo + namespace: kube-system + labels: + app.kubernetes.io/name: kilo +spec: + template: + metadata: + labels: + app.kubernetes.io/name: kilo + spec: + hostNetwork: true + containers: + - name: kilo + image: squat/kilo + args: + - --kubeconfig=/etc/kubernetes/kubeconfig + securityContext: + privileged: true + volumeMounts: + - name: kubeconfig + mountPath: /etc/kubernetes + readOnly: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: kubeconfig + configMap: + name: kube-proxy + items: + - key: kubeconfig.conf + path: kubeconfig diff --git a/manifests/kilo-typhoon.yaml b/manifests/kilo-typhoon.yaml new file mode 100644 index 0000000..fa08270 --- /dev/null +++ b/manifests/kilo-typhoon.yaml @@ -0,0 +1,34 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kilo + namespace: kube-system + labels: + app.kubernetes.io/name: kilo +spec: + template: + metadata: + labels: + app.kubernetes.io/name: kilo + spec: + hostNetwork: true + containers: + - name: kilo + image: squat/kilo + args: + - --kubeconfig=/etc/kubernetes/kubeconfig + securityContext: + privileged: true + volumeMounts: + - name: kubeconfig + mountPath: /etc/kubernetes + readOnly: true + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: kubeconfig + configMap: + name: kubeconfig-in-cluster diff --git a/pkg/iproute/ipip.go b/pkg/iproute/ipip.go new file mode 100644 index 0000000..1149c37 --- /dev/null +++ b/pkg/iproute/ipip.go @@ -0,0 +1,59 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iproute + +import ( + "bytes" + "fmt" + "os/exec" + + "github.com/vishvananda/netlink" +) + +const ( + ipipHeaderSize = 20 + tunnelName = "tunl0" +) + +// NewIPIP creates an IPIP interface using the base interface +// to derive the tunnel's MTU. +func NewIPIP(baseIndex int) (int, error) { + link, err := netlink.LinkByName(tunnelName) + if err != nil { + // If we failed to find the tunnel, then it probably simply does not exist. + cmd := exec.Command("ip", "tunnel", "add", tunnelName, "mode", "ipip") + var stderr bytes.Buffer + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return 0, fmt.Errorf("failed to create IPIP tunnel: %s", stderr.String()) + } + link, err = netlink.LinkByName(tunnelName) + if err != nil { + return 0, fmt.Errorf("failed to get tunnel device: %v", err) + } + } + + base, err := netlink.LinkByIndex(baseIndex) + if err != nil { + return 0, fmt.Errorf("failed to get base device: %v", err) + } + + mtu := base.Attrs().MTU - ipipHeaderSize + if err = netlink.LinkSetMTU(link, mtu); err != nil { + return 0, fmt.Errorf("failed to set tunnel MTU: %v", err) + } + + return link.Attrs().Index, nil +} diff --git a/pkg/iproute/iproute.go b/pkg/iproute/iproute.go new file mode 100644 index 0000000..389bf39 --- /dev/null +++ b/pkg/iproute/iproute.go @@ -0,0 +1,70 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iproute + +import ( + "fmt" + "net" + + "github.com/vishvananda/netlink" +) + +// RemoveInterface removes an interface. +func RemoveInterface(index int) error { + link, err := netlink.LinkByIndex(index) + if err != nil { + return fmt.Errorf("failed to get link: %s", err) + } + return netlink.LinkDel(link) +} + +// Set sets the interface up or down. +func Set(index int, up bool) error { + link, err := netlink.LinkByIndex(index) + if err != nil { + return fmt.Errorf("failed to get link: %s", err) + } + if up { + return netlink.LinkSetUp(link) + } + return netlink.LinkSetDown(link) +} + +// SetAddress sets the IP address of an interface. +func SetAddress(index int, cidr *net.IPNet) error { + link, err := netlink.LinkByIndex(index) + if err != nil { + return fmt.Errorf("failed to get link: %s", err) + } + addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return err + } + l := len(addrs) + for _, addr := range addrs { + if addr.IP.Equal(cidr.IP) && addr.Mask.String() == cidr.Mask.String() { + continue + } + if err := netlink.AddrDel(link, &addr); err != nil { + return fmt.Errorf("failed to delete address: %s", err) + } + l-- + } + // The only address left is the desired address, so quit. + if l == 1 { + return nil + } + return netlink.AddrReplace(link, &netlink.Addr{IPNet: cidr}) +} diff --git a/pkg/ipset/ipset.go b/pkg/ipset/ipset.go new file mode 100644 index 0000000..decb2ec --- /dev/null +++ b/pkg/ipset/ipset.go @@ -0,0 +1,199 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ipset + +import ( + "bytes" + "fmt" + "net" + "os/exec" + "sync" + "time" +) + +// Set represents an ipset. +// Set can safely be used concurrently. +type Set struct { + errors chan error + hosts map[string]struct{} + mu sync.Mutex + name string + subscribed bool + + // Make these functions fields to allow + // for testing. + add func(string) error + del func(string) error +} + +func setExists(name string) (bool, error) { + cmd := exec.Command("ipset", "list", "-n") + var stderr, stdout bytes.Buffer + cmd.Stderr = &stderr + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return false, fmt.Errorf("failed to check for set %s: %s", name, stderr.String()) + } + return bytes.Contains(stdout.Bytes(), []byte(name)), nil +} + +func hostInSet(set, name string) (bool, error) { + cmd := exec.Command("ipset", "list", set) + var stderr, stdout bytes.Buffer + cmd.Stderr = &stderr + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return false, fmt.Errorf("failed to check for host %s: %s", name, stderr.String()) + } + return bytes.Contains(stdout.Bytes(), []byte(name)), nil +} + +// New generates a new ipset. +func New(name string) *Set { + return &Set{ + errors: make(chan error), + hosts: make(map[string]struct{}), + name: name, + + add: func(ip string) error { + ok, err := hostInSet(name, ip) + if err != nil { + return err + } + if !ok { + cmd := exec.Command("ipset", "add", name, ip) + var stderr bytes.Buffer + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to add host %s to set %s: %s", ip, name, stderr.String()) + } + } + return nil + }, + del: func(ip string) error { + ok, err := hostInSet(name, ip) + if err != nil { + return err + } + if ok { + cmd := exec.Command("ipset", "del", name, ip) + var stderr bytes.Buffer + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to remove host %s from set %s: %s", ip, name, stderr.String()) + } + } + return nil + }, + } +} + +// Run watches for changes to the ipset and reconciles +// the ipset against the desired state. +func (s *Set) Run(stop <-chan struct{}) (<-chan error, error) { + s.mu.Lock() + if s.subscribed { + s.mu.Unlock() + return s.errors, nil + } + // Ensure a given instance only subscribes once. + s.subscribed = true + s.mu.Unlock() + go func() { + defer close(s.errors) + for { + select { + case <-time.After(2 * time.Second): + case <-stop: + return + } + ok, err := setExists(s.name) + if err != nil { + nonBlockingSend(s.errors, err) + } + // The set does not exist so wait and try again later. + if !ok { + continue + } + s.mu.Lock() + for h := range s.hosts { + if err := s.add(h); err != nil { + nonBlockingSend(s.errors, err) + } + } + s.mu.Unlock() + } + }() + return s.errors, nil +} + +// CleanUp will clean up any hosts added to the set. +func (s *Set) CleanUp() error { + s.mu.Lock() + defer s.mu.Unlock() + for h := range s.hosts { + if err := s.del(h); err != nil { + return err + } + delete(s.hosts, h) + } + return nil +} + +// Set idempotently overwrites any hosts previously defined +// for the ipset with the given hosts. +func (s *Set) Set(hosts []net.IP) error { + h := make(map[string]struct{}) + for _, host := range hosts { + if host == nil { + continue + } + h[host.String()] = struct{}{} + } + exists, err := setExists(s.name) + if err != nil { + return err + } + s.mu.Lock() + defer s.mu.Unlock() + for k := range s.hosts { + if _, ok := h[k]; !ok { + if exists { + if err := s.del(k); err != nil { + return err + } + } + delete(s.hosts, k) + } + } + for k := range h { + if _, ok := s.hosts[k]; !ok { + if exists { + if err := s.add(k); err != nil { + return err + } + } + s.hosts[k] = struct{}{} + } + } + return nil +} + +func nonBlockingSend(errors chan<- error, err error) { + select { + case errors <- err: + default: + } +} diff --git a/pkg/iptables/fake.go b/pkg/iptables/fake.go new file mode 100644 index 0000000..6f8a86a --- /dev/null +++ b/pkg/iptables/fake.go @@ -0,0 +1,92 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "fmt" + "strings" + + "github.com/coreos/go-iptables/iptables" +) + +type statusExiter interface { + ExitStatus() int +} + +var _ statusExiter = (*iptables.Error)(nil) +var _ statusExiter = statusError(0) + +type statusError int + +func (s statusError) Error() string { + return fmt.Sprintf("%d", s) +} + +func (s statusError) ExitStatus() int { + return int(s) +} + +type fakeClient map[string]Rule + +var _ iptablesClient = fakeClient(nil) + +func (f fakeClient) AppendUnique(table, chain string, spec ...string) error { + r := &rule{table, chain, spec, nil} + f[r.String()] = r + return nil +} + +func (f fakeClient) Delete(table, chain string, spec ...string) error { + r := &rule{table, chain, spec, nil} + delete(f, r.String()) + return nil +} + +func (f fakeClient) Exists(table, chain string, spec ...string) (bool, error) { + r := &rule{table, chain, spec, nil} + _, ok := f[r.String()] + return ok, nil +} + +func (f fakeClient) ClearChain(table, name string) error { + c := &chain{table, name, nil} + for k := range f { + if strings.HasPrefix(k, c.String()) { + delete(f, k) + } + } + f[c.String()] = c + return nil +} + +func (f fakeClient) DeleteChain(table, name string) error { + c := &chain{table, name, nil} + for k := range f { + if strings.HasPrefix(k, c.String()) { + return fmt.Errorf("cannot delete chain %s; rules exist", name) + } + } + delete(f, c.String()) + return nil +} + +func (f fakeClient) NewChain(table, name string) error { + c := &chain{table, name, nil} + if _, ok := f[c.String()]; ok { + return statusError(1) + } + f[c.String()] = c + return nil +} diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go new file mode 100644 index 0000000..e67d8e0 --- /dev/null +++ b/pkg/iptables/iptables.go @@ -0,0 +1,289 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/coreos/go-iptables/iptables" +) + +type iptablesClient interface { + AppendUnique(string, string, ...string) error + Delete(string, string, ...string) error + Exists(string, string, ...string) (bool, error) + ClearChain(string, string) error + DeleteChain(string, string) error + NewChain(string, string) error +} + +// rule represents an iptables rule. +type rule struct { + table string + chain string + spec []string + client iptablesClient +} + +func (r *rule) Add() error { + if err := r.client.AppendUnique(r.table, r.chain, r.spec...); err != nil { + return fmt.Errorf("failed to add iptables rule: %v", err) + } + return nil +} + +func (r *rule) Delete() error { + // Ignore the returned error as an error likely means + // that the rule doesn't exist, which is fine. + r.client.Delete(r.table, r.chain, r.spec...) + return nil +} + +func (r *rule) Exists() (bool, error) { + return r.client.Exists(r.table, r.chain, r.spec...) +} + +func (r *rule) String() string { + if r == nil { + return "" + } + return fmt.Sprintf("%s_%s_%s", r.table, r.chain, strings.Join(r.spec, "_")) +} + +// chain represents an iptables chain. +type chain struct { + table string + chain string + client iptablesClient +} + +func (c *chain) Add() error { + if err := c.client.ClearChain(c.table, c.chain); err != nil { + return fmt.Errorf("failed to add iptables chain: %v", err) + } + return nil +} + +func (c *chain) Delete() error { + // The chain must be empty before it can be deleted. + if err := c.client.ClearChain(c.table, c.chain); err != nil { + return fmt.Errorf("failed to clear iptables chain: %v", err) + } + // Ignore the returned error as an error likely means + // that the chain doesn't exist, which is fine. + c.client.DeleteChain(c.table, c.chain) + return nil +} + +func (c *chain) Exists() (bool, error) { + // The code for "chain already exists". + existsErr := 1 + err := c.client.NewChain(c.table, c.chain) + se, ok := err.(statusExiter) + switch { + case err == nil: + // If there was no error adding a new chain, then it did not exist. + // Delete it and return false. + c.client.DeleteChain(c.table, c.chain) + return false, nil + case ok && se.ExitStatus() == existsErr: + return true, nil + default: + return false, err + } +} + +func (c *chain) String() string { + if c == nil { + return "" + } + return fmt.Sprintf("%s_%s", c.table, c.chain) +} + +// Rule is an interface for interacting with iptables objects. +type Rule interface { + Add() error + Delete() error + Exists() (bool, error) + String() string +} + +// Controller is able to reconcile a given set of iptables rules. +type Controller struct { + client iptablesClient + errors chan error + rules map[string]Rule + mu sync.Mutex + subscribed bool +} + +// New generates a new iptables rules controller. +// It expects an IP address length to determine +// whether to operate in IPv4 or IPv6 mode. +func New(ipLength int) (*Controller, error) { + p := iptables.ProtocolIPv4 + if ipLength == net.IPv6len { + p = iptables.ProtocolIPv6 + } + client, err := iptables.NewWithProtocol(p) + if err != nil { + return nil, fmt.Errorf("failed to create iptables client: %v", err) + } + return &Controller{ + client: client, + errors: make(chan error), + rules: make(map[string]Rule), + }, nil +} + +// Run watches for changes to iptables rules and reconciles +// the rules against the desired state. +func (c *Controller) Run(stop <-chan struct{}) (<-chan error, error) { + c.mu.Lock() + if c.subscribed { + c.mu.Unlock() + return c.errors, nil + } + // Ensure a given instance only subscribes once. + c.subscribed = true + c.mu.Unlock() + go func() { + defer close(c.errors) + for { + select { + case <-time.After(5 * time.Second): + case <-stop: + return + } + c.mu.Lock() + for _, r := range c.rules { + ok, err := r.Exists() + if err != nil { + nonBlockingSend(c.errors, fmt.Errorf("failed to check if rule exists: %v", err)) + } + if !ok { + if err := r.Add(); err != nil { + nonBlockingSend(c.errors, fmt.Errorf("failed to add rule: %v", err)) + } + } + } + c.mu.Unlock() + } + }() + return c.errors, nil +} + +// Set idempotently overwrites any iptables rules previously defined +// for the controller with the given set of rules. +func (c *Controller) Set(rules []Rule) error { + r := make(map[string]struct{}) + for i := range rules { + if rules[i] == nil { + continue + } + switch v := rules[i].(type) { + case *rule: + v.client = c.client + case *chain: + v.client = c.client + } + r[rules[i].String()] = struct{}{} + } + c.mu.Lock() + defer c.mu.Unlock() + for k, rule := range c.rules { + if _, ok := r[k]; !ok { + if err := rule.Delete(); err != nil { + return fmt.Errorf("failed to delete rule: %v", err) + } + delete(c.rules, k) + } + } + // Iterate over the slice rather than the map + // to ensure the rules are added in order. + for _, rule := range rules { + if _, ok := c.rules[rule.String()]; !ok { + if err := rule.Add(); err != nil { + return fmt.Errorf("failed to add rule: %v", err) + } + c.rules[rule.String()] = rule + } + } + return nil +} + +// CleanUp will clean up any rules created by the controller. +func (c *Controller) CleanUp() error { + c.mu.Lock() + defer c.mu.Unlock() + for k, rule := range c.rules { + if err := rule.Delete(); err != nil { + return fmt.Errorf("failed to delete rule: %v", err) + } + delete(c.rules, k) + } + return nil +} + +// EncapsulateRules returns a set of iptables rules that are necessary +// when traffic between nodes must be encapsulated. +func EncapsulateRules(nodes []*net.IPNet) []Rule { + var rules []Rule + for _, n := range nodes { + // Accept encapsulated traffic from peers. + rules = append(rules, &rule{"filter", "INPUT", []string{"-m", "comment", "--comment", "Kilo: allow IPIP traffic", "-s", n.IP.String(), "-p", "4", "-j", "ACCEPT"}, nil}) + } + return rules +} + +// ForwardRules returns a set of iptables rules that are necessary +// when traffic must be forwarded for the overlay. +func ForwardRules(subnet *net.IPNet) []Rule { + s := subnet.String() + return []Rule{ + // Forward traffic to and from the overlay. + &rule{"filter", "FORWARD", []string{"-s", s, "-j", "ACCEPT"}, nil}, + &rule{"filter", "FORWARD", []string{"-d", s, "-j", "ACCEPT"}, nil}, + } +} + +// MasqueradeRules returns a set of iptables rules that are necessary +// when traffic must be masqueraded for Kilo. +func MasqueradeRules(subnet, localPodSubnet *net.IPNet, remotePodSubnet []*net.IPNet) []Rule { + var rules []Rule + rules = append(rules, &chain{"mangle", "KILO-MARK", nil}) + rules = append(rules, &rule{"mangle", "PREROUTING", []string{"-m", "comment", "--comment", "Kilo: jump to mark chain", "-i", "kilo+", "-j", "KILO-MARK"}, nil}) + rules = append(rules, &rule{"mangle", "KILO-MARK", []string{"-m", "comment", "--comment", "Kilo: do not mark packets destined for the local Pod subnet", "-d", localPodSubnet.String(), "-j", "RETURN"}, nil}) + if subnet != nil { + rules = append(rules, &rule{"mangle", "KILO-MARK", []string{"-m", "comment", "--comment", "Kilo: do not mark packets destined for the local private subnet", "-d", subnet.String(), "-j", "RETURN"}, nil}) + } + rules = append(rules, &rule{"mangle", "KILO-MARK", []string{"-m", "comment", "--comment", "Kilo: remaining packets should be marked for NAT", "-j", "MARK", "--set-xmark", "0x1107/0x1107"}, nil}) + rules = append(rules, &rule{"nat", "POSTROUTING", []string{"-m", "comment", "--comment", "Kilo: NAT packets from Kilo interface", "-m", "mark", "--mark", "0x1107/0x1107", "-j", "MASQUERADE"}, nil}) + for _, r := range remotePodSubnet { + rules = append(rules, &rule{"nat", "POSTROUTING", []string{"-m", "comment", "--comment", "Kilo: NAT packets from local pod subnet to remote pod subnets", "-s", localPodSubnet.String(), "-d", r.String(), "-j", "MASQUERADE"}, nil}) + } + return rules +} + +func nonBlockingSend(errors chan<- error, err error) { + select { + case errors <- err: + default: + } +} diff --git a/pkg/iptables/iptables_test.go b/pkg/iptables/iptables_test.go new file mode 100644 index 0000000..f30151d --- /dev/null +++ b/pkg/iptables/iptables_test.go @@ -0,0 +1,101 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "testing" +) + +var rules = []Rule{ + &rule{"filter", "FORWARD", []string{"-s", "10.4.0.0/16", "-j", "ACCEPT"}, nil}, + &rule{"filter", "FORWARD", []string{"-d", "10.4.0.0/16", "-j", "ACCEPT"}, nil}, +} + +func newController() *Controller { + return &Controller{ + rules: make(map[string]Rule), + } +} + +func TestSet(t *testing.T) { + for _, tc := range []struct { + name string + rules []Rule + }{ + { + name: "empty", + rules: nil, + }, + { + name: "single", + rules: []Rule{rules[0]}, + }, + { + name: "multiple", + rules: []Rule{rules[0], rules[1]}, + }, + } { + backend := make(map[string]Rule) + controller := newController() + controller.client = fakeClient(backend) + if err := controller.Set(tc.rules); err != nil { + t.Fatalf("test case %q: got unexpected error: %v", tc.name, err) + } + for _, r := range tc.rules { + r1 := backend[r.String()] + r2 := controller.rules[r.String()] + if r.String() != r1.String() || r.String() != r2.String() { + t.Errorf("test case %q: expected all rules to be equal: expected %v, got %v and %v", tc.name, r, r1, r2) + } + } + } +} + +func TestCleanUp(t *testing.T) { + for _, tc := range []struct { + name string + rules []Rule + }{ + { + name: "empty", + rules: nil, + }, + { + name: "single", + rules: []Rule{rules[0]}, + }, + { + name: "multiple", + rules: []Rule{rules[0], rules[1]}, + }, + } { + backend := make(map[string]Rule) + controller := newController() + controller.client = fakeClient(backend) + if err := controller.Set(tc.rules); err != nil { + t.Fatalf("test case %q: Set should not fail: %v", tc.name, err) + } + if err := controller.CleanUp(); err != nil { + t.Errorf("test case %q: got unexpected error: %v", tc.name, err) + } + for _, r := range tc.rules { + r1 := backend[r.String()] + r2 := controller.rules[r.String()] + if r1 != nil || r2 != nil { + t.Errorf("test case %q: expected all rules to be nil: expected got %v and %v", tc.name, r1, r2) + } + } + } +} diff --git a/pkg/k8s/backend.go b/pkg/k8s/backend.go new file mode 100644 index 0000000..b458896 --- /dev/null +++ b/pkg/k8s/backend.go @@ -0,0 +1,229 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "path" + "strings" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + v1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + v1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + "github.com/squat/kilo/pkg/mesh" +) + +const ( + // Backend is the name of this mesh backend. + Backend = "kubernetes" + externalIPAnnotationKey = "kilo.squat.ai/external-ip" + forceExternalIPAnnotationKey = "kilo.squat.ai/force-external-ip" + internalIPAnnotationKey = "kilo.squat.ai/internal-ip" + keyAnnotationKey = "kilo.squat.ai/key" + leaderAnnotationKey = "kilo.squat.ai/leader" + locationAnnotationKey = "kilo.squat.ai/location" + regionLabelKey = "failure-domain.beta.kubernetes.io/region" + jsonPatchSlash = "~1" + jsonRemovePatch = `{"op": "remove", "path": "%s"}` +) + +type backend struct { + client kubernetes.Interface + events chan *mesh.Event + informer cache.SharedIndexInformer + lister v1listers.NodeLister +} + +// New creates a new instance of a mesh.Backend. +func New(client kubernetes.Interface) mesh.Backend { + informer := v1informers.NewNodeInformer(client, 5*time.Minute, nil) + + b := &backend{ + client: client, + events: make(chan *mesh.Event), + informer: informer, + lister: v1listers.NewNodeLister(informer.GetIndexer()), + } + + return b +} + +// CleanUp removes configuration applied to the backend. +func (b *backend) CleanUp(name string) error { + patch := []byte("[" + strings.Join([]string{ + fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(externalIPAnnotationKey, "/", jsonPatchSlash, 1))), + fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(internalIPAnnotationKey, "/", jsonPatchSlash, 1))), + fmt.Sprintf(jsonRemovePatch, path.Join("/metadata", "annotations", strings.Replace(keyAnnotationKey, "/", jsonPatchSlash, 1))), + }, ",") + "]") + if _, err := b.client.CoreV1().Nodes().Patch(name, types.JSONPatchType, patch); err != nil { + return fmt.Errorf("failed to patch node: %v", err) + } + return nil +} + +// Get gets a single Node by name. +func (b *backend) Get(name string) (*mesh.Node, error) { + n, err := b.lister.Get(name) + if err != nil { + return nil, err + } + return translateNode(n), nil +} + +// Init initializes the backend; for this backend that means +// syncing the informer cache. +func (b *backend) Init(stop <-chan struct{}) error { + go b.informer.Run(stop) + if ok := cache.WaitForCacheSync(stop, func() bool { + return b.informer.HasSynced() + }); !ok { + return errors.New("failed to start sync node cache") + } + b.informer.AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + n, ok := obj.(*v1.Node) + if !ok { + // Failed to decode Node; ignoring... + return + } + b.events <- &mesh.Event{Type: mesh.AddEvent, Node: translateNode(n)} + }, + UpdateFunc: func(_, obj interface{}) { + n, ok := obj.(*v1.Node) + if !ok { + // Failed to decode Node; ignoring... + return + } + b.events <- &mesh.Event{Type: mesh.UpdateEvent, Node: translateNode(n)} + }, + DeleteFunc: func(obj interface{}) { + n, ok := obj.(*v1.Node) + if !ok { + // Failed to decode Node; ignoring... + return + } + b.events <- &mesh.Event{Type: mesh.DeleteEvent, Node: translateNode(n)} + }, + }, + ) + return nil +} + +// List gets all the Nodes in the cluster. +func (b *backend) List() ([]*mesh.Node, error) { + ns, err := b.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + nodes := make([]*mesh.Node, len(ns)) + for i := range ns { + nodes[i] = translateNode(ns[i]) + } + return nodes, nil +} + +// Set sets the fields of a node. +func (b *backend) Set(name string, node *mesh.Node) error { + old, err := b.lister.Get(name) + if err != nil { + return fmt.Errorf("failed to find node: %v", err) + } + n := old.DeepCopy() + n.ObjectMeta.Annotations[externalIPAnnotationKey] = node.ExternalIP.String() + n.ObjectMeta.Annotations[internalIPAnnotationKey] = node.InternalIP.String() + n.ObjectMeta.Annotations[keyAnnotationKey] = string(node.Key) + oldData, err := json.Marshal(old) + if err != nil { + return err + } + newData, err := json.Marshal(n) + if err != nil { + return err + } + patch, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) + if err != nil { + return fmt.Errorf("failed to create patch for node %q: %v", n.Name, err) + } + if _, err = b.client.CoreV1().Nodes().Patch(name, types.StrategicMergePatchType, patch); err != nil { + return fmt.Errorf("failed to patch node: %v", err) + } + return nil +} + +// Watch returns a chan of node events. +func (b *backend) Watch() <-chan *mesh.Event { + return b.events +} + +// translateNode translates a Kubernetes Node to a mesh.Node. +func translateNode(node *v1.Node) *mesh.Node { + if node == nil { + return nil + } + _, subnet, err := net.ParseCIDR(node.Spec.PodCIDR) + // The subnet should only ever fail to parse if the pod CIDR has not been set, + // so in this case set the subnet to nil and let the node be updated. + if err != nil { + subnet = nil + } + _, leader := node.ObjectMeta.Annotations[leaderAnnotationKey] + // Allow the region to be overridden by an explicit location. + location, ok := node.ObjectMeta.Annotations[locationAnnotationKey] + if !ok { + location = node.ObjectMeta.Labels[regionLabelKey] + } + // Allow the external IP to be overridden. + externalIP, ok := node.ObjectMeta.Annotations[forceExternalIPAnnotationKey] + if !ok { + externalIP = node.ObjectMeta.Annotations[externalIPAnnotationKey] + } + return &mesh.Node{ + // ExternalIP and InternalIP should only ever fail to parse if the + // remote node's mesh has not yet set its IP address; + // in this case the IP will be nil and + // the mesh can wait for the node to be updated. + ExternalIP: normalizeIP(externalIP), + InternalIP: normalizeIP(node.ObjectMeta.Annotations[internalIPAnnotationKey]), + Key: []byte(node.ObjectMeta.Annotations[keyAnnotationKey]), + Leader: leader, + Location: location, + Name: node.Name, + Subnet: subnet, + } +} + +func normalizeIP(ip string) *net.IPNet { + i, ipNet, _ := net.ParseCIDR(ip) + if ipNet == nil { + return ipNet + } + if ip4 := i.To4(); ip4 != nil { + ipNet.IP = ip4 + return ipNet + } + ipNet.IP = i.To16() + return ipNet +} diff --git a/pkg/k8s/backend_test.go b/pkg/k8s/backend_test.go new file mode 100644 index 0000000..9f4a323 --- /dev/null +++ b/pkg/k8s/backend_test.go @@ -0,0 +1,145 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package k8s + +import ( + "net" + "testing" + + "github.com/kylelemons/godebug/pretty" + "k8s.io/api/core/v1" + + "github.com/squat/kilo/pkg/mesh" +) + +func TestTranslateNode(t *testing.T) { + for _, tc := range []struct { + name string + annotations map[string]string + labels map[string]string + out *mesh.Node + subnet string + }{ + { + name: "empty", + annotations: nil, + out: &mesh.Node{}, + }, + { + name: "invalid ip", + annotations: map[string]string{ + externalIPAnnotationKey: "10.0.0.1", + internalIPAnnotationKey: "10.0.0.1", + }, + out: &mesh.Node{}, + }, + { + name: "valid ip", + annotations: map[string]string{ + externalIPAnnotationKey: "10.0.0.1/24", + internalIPAnnotationKey: "10.0.0.2/32", + }, + out: &mesh.Node{ + ExternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.1"), Mask: net.CIDRMask(24, 32)}, + InternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(32, 32)}, + }, + }, + { + name: "invalid subnet", + annotations: map[string]string{}, + out: &mesh.Node{}, + subnet: "foo", + }, + { + name: "normalize subnet", + annotations: map[string]string{}, + out: &mesh.Node{ + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(24, 32)}, + }, + subnet: "10.2.0.1/24", + }, + { + name: "valid subnet", + annotations: map[string]string{}, + out: &mesh.Node{ + Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)}, + }, + subnet: "10.2.1.0/24", + }, + { + name: "region", + labels: map[string]string{ + regionLabelKey: "a", + }, + out: &mesh.Node{ + Location: "a", + }, + }, + { + name: "region override", + annotations: map[string]string{ + locationAnnotationKey: "b", + }, + labels: map[string]string{ + regionLabelKey: "a", + }, + out: &mesh.Node{ + Location: "b", + }, + }, + { + name: "external IP override", + annotations: map[string]string{ + externalIPAnnotationKey: "10.0.0.1/24", + forceExternalIPAnnotationKey: "10.0.0.2/24", + }, + out: &mesh.Node{ + ExternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)}, + }, + }, + { + name: "complete", + annotations: map[string]string{ + externalIPAnnotationKey: "10.0.0.1/24", + forceExternalIPAnnotationKey: "10.0.0.2/24", + internalIPAnnotationKey: "10.0.0.2/32", + keyAnnotationKey: "foo", + leaderAnnotationKey: "", + locationAnnotationKey: "b", + }, + labels: map[string]string{ + regionLabelKey: "a", + }, + out: &mesh.Node{ + ExternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(24, 32)}, + InternalIP: &net.IPNet{IP: net.ParseIP("10.0.0.2"), Mask: net.CIDRMask(32, 32)}, + Key: []byte("foo"), + Leader: true, + Location: "b", + Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)}, + }, + subnet: "10.2.1.0/24", + }, + } { + n := &v1.Node{} + n.ObjectMeta.Annotations = tc.annotations + n.ObjectMeta.Labels = tc.labels + n.Spec.PodCIDR = tc.subnet + node := translateNode(n) + if diff := pretty.Compare(node, tc.out); diff != "" { + t.Errorf("test case %q: got diff: %v", tc.name, diff) + } + } +} diff --git a/pkg/mesh/graph.go b/pkg/mesh/graph.go new file mode 100644 index 0000000..35161dc --- /dev/null +++ b/pkg/mesh/graph.go @@ -0,0 +1,101 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mesh + +import ( + "fmt" + "net" + + "github.com/awalterschulze/gographviz" +) + +// Dot generates a Graphviz graph of the Topology in DOT fomat. +func (t *Topology) Dot() (string, error) { + g := gographviz.NewGraph() + g.Name = "kilo" + if err := g.AddAttr("kilo", string(gographviz.Label), graphEscape(t.subnet.String())); err != nil { + return "", fmt.Errorf("failed to add label to graph") + } + if err := g.AddAttr("kilo", string(gographviz.LabelLOC), "t"); err != nil { + return "", fmt.Errorf("failed to add label location to graph") + } + if err := g.AddAttr("kilo", string(gographviz.Overlap), "false"); err != nil { + return "", fmt.Errorf("failed to disable graph overlap") + } + if err := g.SetDir(true); err != nil { + return "", fmt.Errorf("failed to set direction") + } + leaders := make([]string, len(t.Segments)) + nodeAttrs := map[string]string{ + string(gographviz.Shape): "ellipse", + } + for i, s := range t.Segments { + if err := g.AddSubGraph("kilo", subGraphName(s.Location), nil); err != nil { + return "", fmt.Errorf("failed to add subgraph") + } + if err := g.AddAttr(subGraphName(s.Location), string(gographviz.Label), graphEscape(s.Location)); err != nil { + return "", fmt.Errorf("failed to add label to subgraph") + } + if err := g.AddAttr(subGraphName(s.Location), string(gographviz.Style), `"dashed,rounded"`); err != nil { + return "", fmt.Errorf("failed to add style to subgraph") + } + for j := range s.cidrs { + if err := g.AddNode(subGraphName(s.Location), graphEscape(s.hostnames[j]), nodeAttrs); err != nil { + return "", fmt.Errorf("failed to add node to subgraph") + } + var wg net.IP + if j == s.leader { + wg = s.wireGuardIP + if err := g.Nodes.Lookup[graphEscape(s.hostnames[j])].Attrs.Add(string(gographviz.Rank), "1"); err != nil { + return "", fmt.Errorf("failed to add rank to node") + } + } + if err := g.Nodes.Lookup[graphEscape(s.hostnames[j])].Attrs.Add(string(gographviz.Label), nodeLabel(s.Location, s.hostnames[j], s.cidrs[j], s.privateIPs[j], wg)); err != nil { + return "", fmt.Errorf("failed to add label to node") + } + } + meshSubGraph(g, g.Relations.SortedChildren(subGraphName(s.Location)), s.leader) + leaders[i] = graphEscape(s.hostnames[s.leader]) + } + meshSubGraph(g, leaders, 0) + return g.String(), nil +} + +func meshSubGraph(g *gographviz.Graph, nodes []string, leader int) { + for i := range nodes { + if i == leader { + continue + } + a := make(gographviz.Attrs) + a[gographviz.Dir] = "both" + g.Edges.Add(&gographviz.Edge{Src: nodes[leader], Dst: nodes[i], Dir: true, Attrs: a}) + } +} + +func graphEscape(s string) string { + return fmt.Sprintf("\"%s\"", s) +} + +func subGraphName(name string) string { + return graphEscape(fmt.Sprintf("cluster_%s", name)) +} + +func nodeLabel(location, name string, cidr *net.IPNet, priv, wgIP net.IP) string { + var wg string + if wgIP != nil { + wg = wgIP.String() + } + return graphEscape(fmt.Sprintf("%s\n%s\n%s\n%s\n%s", location, name, cidr.String(), priv.String(), wg)) +} diff --git a/pkg/mesh/ip.go b/pkg/mesh/ip.go new file mode 100644 index 0000000..3cb5bf2 --- /dev/null +++ b/pkg/mesh/ip.go @@ -0,0 +1,348 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mesh + +import ( + "errors" + "fmt" + "net" + "sort" + + "github.com/vishvananda/netlink" +) + +// getIP returns a private and public IP address for the local node. +// It selects the private IP address in the following order: +// - private IP to which hostname resolves +// - private IP assigned to interface of default route +// - private IP assigned to local interface +// - public IP to which hostname resolves +// - public IP assigned to interface of default route +// - public IP assigned to local interface +// It selects the public IP address in the following order: +// - public IP to which hostname resolves +// - public IP assigned to interface of default route +// - public IP assigned to local interface +// - private IP to which hostname resolves +// - private IP assigned to interface of default route +// - private IP assigned to local interface +// - if no IP was found, return nil and an error. +func getIP(hostname string) (*net.IPNet, *net.IPNet, error) { + var hostPriv, hostPub []*net.IPNet + { + // Check IPs to which hostname resolves first. + ips, err := ipsForHostname(hostname) + if err != nil { + return nil, nil, err + } + for _, ip := range ips { + ok, mask, err := assignedToInterface(ip) + if err != nil { + return nil, nil, fmt.Errorf("failed to search locally assigned addresses: %v", err) + } + if !ok { + continue + } + ip.Mask = mask + if isPublic(ip) { + hostPub = append(hostPub, ip) + continue + } + hostPriv = append(hostPriv, ip) + } + sortIPs(hostPriv) + sortIPs(hostPub) + } + + var defaultPriv, defaultPub []*net.IPNet + { + // Check IPs on interface for default route next. + iface, err := defaultInterface() + if err != nil { + return nil, nil, err + } + ips, err := ipsForInterface(iface) + if err != nil { + return nil, nil, err + } + for _, ip := range ips { + if isLocal(ip.IP) { + continue + } + if isPublic(ip) { + defaultPub = append(defaultPub, ip) + continue + } + defaultPriv = append(defaultPriv, ip) + } + sortIPs(defaultPriv) + sortIPs(defaultPub) + } + + var interfacePriv, interfacePub []*net.IPNet + { + // Finally look for IPs on all interfaces. + ips, err := ipsForAllInterfaces() + if err != nil { + return nil, nil, err + } + for _, ip := range ips { + if isLocal(ip.IP) { + continue + } + if isPublic(ip) { + interfacePub = append(interfacePub, ip) + continue + } + interfacePriv = append(interfacePriv, ip) + } + sortIPs(interfacePriv) + sortIPs(interfacePub) + } + + var priv, pub []*net.IPNet + priv = append(priv, hostPriv...) + priv = append(priv, defaultPriv...) + priv = append(priv, interfacePriv...) + pub = append(pub, hostPub...) + pub = append(pub, defaultPub...) + pub = append(pub, interfacePub...) + if len(priv) == 0 && len(pub) == 0 { + return nil, nil, errors.New("no valid IP was found") + } + if len(priv) == 0 { + priv = pub + } + if len(pub) == 0 { + pub = priv + } + return priv[0], pub[0], nil +} + +// sortIPs sorts IPs so the result is stable. +// It will first sort IPs by type, to prefer selecting +// IPs of the same type, and then by value. +func sortIPs(ips []*net.IPNet) { + sort.Slice(ips, func(i, j int) bool { + i4, j4 := ips[i].IP.To4(), ips[j].IP.To4() + if i4 != nil && j4 == nil { + return true + } + if j4 != nil && i4 == nil { + return false + } + return ips[i].String() < ips[j].String() + }) +} + +func assignedToInterface(ip *net.IPNet) (bool, net.IPMask, error) { + links, err := netlink.LinkList() + if err != nil { + return false, nil, fmt.Errorf("failed to list interfaces: %v", err) + } + // Sort the links for stability. + sort.Slice(links, func(i, j int) bool { + return links[i].Attrs().Name < links[j].Attrs().Name + }) + for _, link := range links { + addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return false, nil, fmt.Errorf("failed to list addresses for %s: %v", link.Attrs().Name, err) + } + // Sort the IPs for stability. + sort.Slice(addrs, func(i, j int) bool { + return addrs[i].String() < addrs[j].String() + }) + for i := range addrs { + if ip.IP.Equal(addrs[i].IP) { + return true, addrs[i].Mask, nil + } + } + } + return false, nil, nil +} + +func isLocal(ip net.IP) bool { + return ip.IsLoopback() || ip.IsLinkLocalMulticast() || ip.IsLinkLocalUnicast() +} + +func isPublic(ip *net.IPNet) bool { + // Check RFC 1918 addresses. + if ip4 := ip.IP.To4(); ip4 != nil { + switch true { + // Check for 10.0.0.0/8. + case ip4[0] == 10: + return false + // Check for 172.16.0.0/12. + case ip4[0] == 172 && ip4[1]&0xf0 == 0x01: + return false + // Check for 192.168.0.0/16. + case ip4[0] == 192 && ip4[1] == 168: + return false + default: + return true + } + } + // Check RFC 4193 addresses. + if len(ip.IP) == net.IPv6len { + switch true { + // Check for fd00::/8. + case ip.IP[0] == 0xfd && ip.IP[1] == 0x00: + return false + default: + return true + } + } + return false +} + +// ipsForHostname returns a slice of IPs to which the +// given hostname resolves. +func ipsForHostname(hostname string) ([]*net.IPNet, error) { + if ip := net.ParseIP(hostname); ip != nil { + return []*net.IPNet{oneAddressCIDR(ip)}, nil + } + ips, err := net.LookupIP(hostname) + if err != nil { + return nil, fmt.Errorf("failed to lookip IPs of hostname: %v", err) + } + nets := make([]*net.IPNet, len(ips)) + for i := range ips { + nets[i] = oneAddressCIDR(ips[i]) + } + return nets, nil +} + +// ipsForAllInterfaces returns a slice of IPs assigned to all the +// interfaces on the host. +func ipsForAllInterfaces() ([]*net.IPNet, error) { + ifaces, err := net.Interfaces() + if err != nil { + return nil, fmt.Errorf("failed to list interfaces: %v", err) + } + var nets []*net.IPNet + for _, iface := range ifaces { + ips, err := ipsForInterface(&iface) + if err != nil { + return nil, fmt.Errorf("failed to list addresses for %s: %v", iface.Name, err) + } + nets = append(nets, ips...) + } + return nets, nil +} + +// ipsForInterface returns a slice of IPs assigned to the given interface. +func ipsForInterface(iface *net.Interface) ([]*net.IPNet, error) { + link, err := netlink.LinkByIndex(iface.Index) + if err != nil { + return nil, fmt.Errorf("failed to get link: %s", err) + } + addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return nil, fmt.Errorf("failed to list addresses for %s: %v", iface.Name, err) + } + var ips []*net.IPNet + for _, a := range addrs { + if a.IPNet != nil { + ips = append(ips, a.IPNet) + } + } + return ips, nil +} + +// interfacesForIP returns a slice of interfaces withthe given IP. +func interfacesForIP(ip *net.IPNet) ([]net.Interface, error) { + ifaces, err := net.Interfaces() + if err != nil { + return nil, fmt.Errorf("failed to list interfaces: %v", err) + } + var interfaces []net.Interface + for _, iface := range ifaces { + ips, err := ipsForInterface(&iface) + if err != nil { + return nil, fmt.Errorf("failed to list addresses for %s: %v", iface.Name, err) + } + for i := range ips { + if ip.IP.Equal(ips[i].IP) { + interfaces = append(interfaces, iface) + break + } + } + } + if len(interfaces) == 0 { + return nil, fmt.Errorf("no interface has %s assigned", ip.String()) + } + return interfaces, nil +} + +// defaultInterface returns the interface for the default route of the host. +func defaultInterface() (*net.Interface, error) { + routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL) + if err != nil { + return nil, err + } + + for _, route := range routes { + if route.Dst == nil || route.Dst.String() == "0.0.0.0/0" || route.Dst.String() == "::/0" { + if route.LinkIndex <= 0 { + return nil, errors.New("failed to determine interface of route") + } + return net.InterfaceByIndex(route.LinkIndex) + } + } + + return nil, errors.New("failed to find default route") +} + +type allocator struct { + bits int + cidr *net.IPNet + current net.IP +} + +func newAllocator(cidr net.IPNet) *allocator { + _, bits := cidr.Mask.Size() + current := make(net.IP, len(cidr.IP)) + copy(current, cidr.IP) + if ip4 := current.To4(); ip4 != nil { + current = ip4 + } + + return &allocator{ + bits: bits, + cidr: &cidr, + current: current, + } +} + +func (a *allocator) next() *net.IPNet { + if a.current == nil { + return nil + } + for i := len(a.current) - 1; i >= 0; i-- { + a.current[i]++ + // if we haven't overflowed, then we can exit. + if a.current[i] != 0 { + break + } + } + if !a.cidr.Contains(a.current) { + a.current = nil + } + ip := make(net.IP, len(a.current)) + copy(ip, a.current) + + return &net.IPNet{IP: ip, Mask: net.CIDRMask(a.bits, a.bits)} +} diff --git a/pkg/mesh/ip_test.go b/pkg/mesh/ip_test.go new file mode 100644 index 0000000..605ba82 --- /dev/null +++ b/pkg/mesh/ip_test.go @@ -0,0 +1,75 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mesh + +import ( + "net" + "testing" +) + +func TestSortIPs(t *testing.T) { + ip1 := oneAddressCIDR(net.ParseIP("10.0.0.1")) + ip2 := oneAddressCIDR(net.ParseIP("10.0.0.2")) + ip3 := oneAddressCIDR(net.ParseIP("192.168.0.1")) + ip4 := oneAddressCIDR(net.ParseIP("2001::7")) + ip5 := oneAddressCIDR(net.ParseIP("fd68:da49:09da:b27f::")) + for _, tc := range []struct { + name string + ips []*net.IPNet + out []*net.IPNet + }{ + { + name: "single", + ips: []*net.IPNet{ip1}, + out: []*net.IPNet{ip1}, + }, + { + name: "IPv4s", + ips: []*net.IPNet{ip2, ip3, ip1}, + out: []*net.IPNet{ip1, ip2, ip3}, + }, + { + name: "IPv4 and IPv6", + ips: []*net.IPNet{ip4, ip1}, + out: []*net.IPNet{ip1, ip4}, + }, + { + name: "IPv6s", + ips: []*net.IPNet{ip5, ip4}, + out: []*net.IPNet{ip4, ip5}, + }, + { + name: "all", + ips: []*net.IPNet{ip3, ip4, ip2, ip5, ip1}, + out: []*net.IPNet{ip1, ip2, ip3, ip4, ip5}, + }, + } { + sortIPs(tc.ips) + equal := true + if len(tc.ips) != len(tc.out) { + equal = false + } else { + for i := range tc.ips { + if !ipNetsEqual(tc.ips[i], tc.out[i]) { + equal = false + break + } + } + } + if !equal { + t.Errorf("test case %q: expected %s, got %s", tc.name, tc.out, tc.ips) + } + } +} diff --git a/pkg/mesh/mesh.go b/pkg/mesh/mesh.go new file mode 100644 index 0000000..14189c8 --- /dev/null +++ b/pkg/mesh/mesh.go @@ -0,0 +1,581 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mesh + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/vishvananda/netlink" + + "github.com/squat/kilo/pkg/iproute" + "github.com/squat/kilo/pkg/ipset" + "github.com/squat/kilo/pkg/iptables" + "github.com/squat/kilo/pkg/route" + "github.com/squat/kilo/pkg/wireguard" +) + +const resyncPeriod = 30 * time.Second + +const ( + // KiloPath is the directory where Kilo stores its configuration. + KiloPath = "/var/lib/kilo" + // PrivateKeyPath is the filepath where the WireGuard private key is stored. + PrivateKeyPath = KiloPath + "/key" + // ConfPath is the filepath where the WireGuard configuration is stored. + ConfPath = KiloPath + "/conf" +) + +// Granularity represents the abstraction level at which the network +// should be meshed. +type Granularity string + +// Encapsulate identifies what packets within a location should +// be encapsulated. +type Encapsulate string + +const ( + // DataCenterGranularity indicates that the network should create + // a mesh between data-centers but not between nodes within a + // single data-center. + DataCenterGranularity Granularity = "data-center" + // NodeGranularity indicates that the network should create + // a mesh between every node. + NodeGranularity Granularity = "node" + // NeverEncapsulate indicates that no packets within a location + // should be encapsulated. + NeverEncapsulate Encapsulate = "never" + // CrossSubnetEncapsulate indicates that only packets that + // traverse subnets within a location should be encapsulated. + CrossSubnetEncapsulate Encapsulate = "crosssubnet" + // AlwaysEncapsulate indicates that all packets within a location + // should be encapsulated. + AlwaysEncapsulate Encapsulate = "always" +) + +// Node represents a node in the network. +type Node struct { + ExternalIP *net.IPNet + Key []byte + InternalIP *net.IPNet + // Leader is a suggestion to Kilo that + // the node wants to lead its segment. + Leader bool + Location string + Name string + Subnet *net.IPNet +} + +// Ready indicates whether or not the node is ready. +func (n *Node) Ready() bool { + return n != nil && n.ExternalIP != nil && n.Key != nil && n.InternalIP != nil && n.Subnet != nil +} + +// EventType describes what kind of an action an event represents. +type EventType string + +const ( + // AddEvent represents an action where an item was added. + AddEvent EventType = "add" + // DeleteEvent represents an action where an item was removed. + DeleteEvent EventType = "delete" + // UpdateEvent represents an action where an item was updated. + UpdateEvent EventType = "update" +) + +// Event represents an update event concerning a node in the cluster. +type Event struct { + Type EventType + Node *Node +} + +// Backend can get nodes by name, init itself, +// list the nodes that should be meshed, +// set Kilo properties for a node, +// clean up any changes applied to the backend, +// and watch for changes to nodes. +type Backend interface { + CleanUp(string) error + Get(string) (*Node, error) + Init(<-chan struct{}) error + List() ([]*Node, error) + Set(string, *Node) error + Watch() <-chan *Event +} + +// Mesh is able to create Kilo network meshes. +type Mesh struct { + Backend + encapsulate Encapsulate + externalIP *net.IPNet + granularity Granularity + hostname string + internalIP *net.IPNet + ipset *ipset.Set + ipTables *iptables.Controller + kiloIface int + key []byte + local bool + port int + priv []byte + privIface int + pub []byte + pubIface int + stop chan struct{} + subnet *net.IPNet + table *route.Table + tunlIface int + + // nodes is a mutable field in the struct + // and needs to be guarded. + nodes map[string]*Node + mu sync.Mutex + + errorCounter *prometheus.CounterVec + nodesGuage prometheus.Gauge + logger log.Logger +} + +// New returns a new Mesh instance. +func New(backend Backend, encapsulate Encapsulate, granularity Granularity, hostname string, port int, subnet *net.IPNet, local bool, logger log.Logger) (*Mesh, error) { + if err := os.MkdirAll(KiloPath, 0700); err != nil { + return nil, fmt.Errorf("failed to create directory to store configuration: %v", err) + } + private, err := ioutil.ReadFile(PrivateKeyPath) + if err != nil { + level.Warn(logger).Log("msg", "no private key found on disk; generating one now") + if private, err = wireguard.GenKey(); err != nil { + return nil, err + } + } + public, err := wireguard.PubKey(private) + if err != nil { + return nil, err + } + if err := ioutil.WriteFile(PrivateKeyPath, private, 0600); err != nil { + return nil, fmt.Errorf("failed to write private key to disk: %v", err) + } + privateIP, publicIP, err := getIP(hostname) + if err != nil { + return nil, fmt.Errorf("failed to find public IP: %v", err) + } + ifaces, err := interfacesForIP(privateIP) + if err != nil { + return nil, fmt.Errorf("failed to find interface for private IP: %v", err) + } + privIface := ifaces[0].Index + ifaces, err = interfacesForIP(publicIP) + if err != nil { + return nil, fmt.Errorf("failed to find interface for public IP: %v", err) + } + pubIface := ifaces[0].Index + kiloIface, err := wireguard.New("kilo") + if err != nil { + return nil, fmt.Errorf("failed to create WireGuard interface: %v", err) + } + var tunlIface int + if encapsulate != NeverEncapsulate { + if tunlIface, err = iproute.NewIPIP(privIface); err != nil { + return nil, fmt.Errorf("failed to create tunnel interface: %v", err) + } + if err := iproute.Set(tunlIface, true); err != nil { + return nil, fmt.Errorf("failed to set tunnel interface up: %v", err) + } + } + level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the private IP address", privateIP.String())) + level.Debug(logger).Log("msg", fmt.Sprintf("using %s as the public IP address", publicIP.String())) + ipTables, err := iptables.New(len(subnet.IP)) + if err != nil { + return nil, fmt.Errorf("failed to IP tables controller: %v", err) + } + return &Mesh{ + Backend: backend, + encapsulate: encapsulate, + externalIP: publicIP, + granularity: granularity, + hostname: hostname, + internalIP: privateIP, + // This is a patch until Calico supports + // other hosts adding IPIP iptables rules. + ipset: ipset.New("cali40all-hosts-net"), + ipTables: ipTables, + kiloIface: kiloIface, + nodes: make(map[string]*Node), + port: port, + priv: private, + privIface: privIface, + pub: public, + pubIface: pubIface, + local: local, + stop: make(chan struct{}), + subnet: subnet, + table: route.NewTable(), + tunlIface: tunlIface, + errorCounter: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "kilo_errors_total", + Help: "Number of errors that occurred while administering the mesh.", + }, []string{"event"}), + nodesGuage: prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "kilo_nodes", + Help: "Number of in the mesh.", + }), + logger: logger, + }, nil +} + +// Run starts the mesh. +func (m *Mesh) Run() error { + if err := m.Init(m.stop); err != nil { + return fmt.Errorf("failed to initialize backend: %v", err) + } + ipsetErrors, err := m.ipset.Run(m.stop) + if err != nil { + return fmt.Errorf("failed to watch for ipset updates: %v", err) + } + ipTablesErrors, err := m.ipTables.Run(m.stop) + if err != nil { + return fmt.Errorf("failed to watch for IP tables updates: %v", err) + } + routeErrors, err := m.table.Run(m.stop) + if err != nil { + return fmt.Errorf("failed to watch for route table updates: %v", err) + } + go func() { + for { + var err error + select { + case err = <-ipsetErrors: + case err = <-ipTablesErrors: + case err = <-routeErrors: + case <-m.stop: + return + } + if err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("run").Inc() + } + } + }() + defer m.cleanUp() + t := time.NewTimer(resyncPeriod) + w := m.Watch() + for { + var e *Event + select { + case e = <-w: + m.sync(e) + case <-t.C: + m.applyTopology() + t.Reset(resyncPeriod) + case <-m.stop: + return nil + } + } +} + +func (m *Mesh) sync(e *Event) { + logger := log.With(m.logger, "event", e.Type) + level.Debug(logger).Log("msg", "syncing", "event", e.Type) + if isSelf(m.hostname, e.Node) { + level.Debug(logger).Log("msg", "processing local node", "node", e.Node) + m.handleLocal(e.Node) + return + } + var diff bool + m.mu.Lock() + if !e.Node.Ready() { + level.Debug(logger).Log("msg", "received incomplete node", "node", e.Node) + // An existing node is no longer valid + // so remove it from the mesh. + if _, ok := m.nodes[e.Node.Name]; ok { + level.Info(logger).Log("msg", "node is no longer in the mesh", "node", e.Node) + delete(m.nodes, e.Node.Name) + diff = true + } + } else { + switch e.Type { + case AddEvent: + fallthrough + case UpdateEvent: + if !nodesAreEqual(m.nodes[e.Node.Name], e.Node) { + m.nodes[e.Node.Name] = e.Node + diff = true + } + case DeleteEvent: + delete(m.nodes, e.Node.Name) + diff = true + } + } + m.mu.Unlock() + if diff { + level.Info(logger).Log("node", e.Node) + m.applyTopology() + } +} + +func (m *Mesh) handleLocal(n *Node) { + // Allow the external IP to be overridden. + if n.ExternalIP == nil { + n.ExternalIP = m.externalIP + } + // Compare the given node to the calculated local node. + // Take leader, location, and subnet from the argument, as these + // are not determined by kilo. + local := &Node{ExternalIP: n.ExternalIP, Key: m.pub, InternalIP: m.internalIP, Leader: n.Leader, Location: n.Location, Name: m.hostname, Subnet: n.Subnet} + if !nodesAreEqual(n, local) { + level.Debug(m.logger).Log("msg", "local node differs from backend") + if err := m.Set(m.hostname, local); err != nil { + level.Error(m.logger).Log("error", fmt.Sprintf("failed to set local node: %v", err), "node", local) + m.errorCounter.WithLabelValues("local").Inc() + return + } + level.Debug(m.logger).Log("msg", "successfully reconciled local node against backend") + } + m.mu.Lock() + n = m.nodes[m.hostname] + if n == nil { + n = &Node{} + } + m.mu.Unlock() + if !nodesAreEqual(n, local) { + m.mu.Lock() + m.nodes[local.Name] = local + m.mu.Unlock() + m.applyTopology() + } +} + +func (m *Mesh) applyTopology() { + m.mu.Lock() + defer m.mu.Unlock() + // Ensure all unready nodes are removed. + var ready float64 + for n := range m.nodes { + if !m.nodes[n].Ready() { + delete(m.nodes, n) + continue + } + ready++ + } + m.nodesGuage.Set(ready) + // We cannot do anything with the topology until the local node is available. + if m.nodes[m.hostname] == nil { + return + } + t, err := NewTopology(m.nodes, m.granularity, m.hostname, m.port, m.priv, m.subnet) + if err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + conf, err := t.Conf() + if err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + } + if err := ioutil.WriteFile(ConfPath, conf, 0600); err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + var private *net.IPNet + // If we are not encapsulating packets to the local private network, + // then pass the private IP to add an exception to the NAT rule. + if m.encapsulate != AlwaysEncapsulate { + private = t.privateIP + } + rules := iptables.MasqueradeRules(private, m.nodes[m.hostname].Subnet, t.RemoteSubnets()) + rules = append(rules, iptables.ForwardRules(m.subnet)...) + if err := m.ipTables.Set(rules); err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + if m.encapsulate != NeverEncapsulate { + var peers []net.IP + for _, s := range t.Segments { + if s.Location == m.nodes[m.hostname].Location { + peers = s.privateIPs + break + } + } + if err := m.ipset.Set(peers); err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + if m.local { + if err := iproute.SetAddress(m.tunlIface, oneAddressCIDR(newAllocator(*m.nodes[m.hostname].Subnet).next().IP)); err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + } + } + if t.leader { + if err := iproute.SetAddress(m.kiloIface, t.wireGuardCIDR); err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + link, err := linkByIndex(m.kiloIface) + if err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + oldConf, err := wireguard.ShowConf(link.Attrs().Name) + if err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + // Setting the WireGuard configuration interrupts existing connections + // so only set the configuration if it has changed. + equal, err := wireguard.CompareConf(conf, oldConf) + if err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + // Don't return here, simply overwrite the old configuration. + equal = false + } + if !equal { + if err := wireguard.SetConf(link.Attrs().Name, ConfPath); err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + } + if err := iproute.Set(m.kiloIface, true); err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + } else { + level.Debug(m.logger).Log("msg", "local node is not the leader") + if err := iproute.Set(m.kiloIface, false); err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + return + } + } + // We need to add routes last since they may depend + // on the WireGuard interface. + routes := t.Routes(m.kiloIface, m.privIface, m.tunlIface, m.local, m.encapsulate) + if err := m.table.Set(routes); err != nil { + level.Error(m.logger).Log("error", err) + m.errorCounter.WithLabelValues("apply").Inc() + } +} + +// RegisterMetrics registers Prometheus metrics on the given Prometheus +// registerer. +func (m *Mesh) RegisterMetrics(r prometheus.Registerer) { + r.MustRegister( + m.errorCounter, + m.nodesGuage, + ) +} + +// Stop stops the mesh. +func (m *Mesh) Stop() { + close(m.stop) +} + +func (m *Mesh) cleanUp() { + if err := m.ipTables.CleanUp(); err != nil { + level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up IP tables: %v", err)) + m.errorCounter.WithLabelValues("cleanUp").Inc() + } + if err := m.table.CleanUp(); err != nil { + level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up routes: %v", err)) + m.errorCounter.WithLabelValues("cleanUp").Inc() + } + if err := os.Remove(PrivateKeyPath); err != nil { + level.Error(m.logger).Log("error", fmt.Sprintf("failed to delete private key: %v", err)) + m.errorCounter.WithLabelValues("cleanUp").Inc() + } + if err := os.Remove(ConfPath); err != nil { + level.Error(m.logger).Log("error", fmt.Sprintf("failed to delete configuration file: %v", err)) + m.errorCounter.WithLabelValues("cleanUp").Inc() + } + if err := iproute.RemoveInterface(m.kiloIface); err != nil { + level.Error(m.logger).Log("error", fmt.Sprintf("failed to remove wireguard interface: %v", err)) + m.errorCounter.WithLabelValues("cleanUp").Inc() + } + if err := m.CleanUp(m.hostname); err != nil { + level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up backend: %v", err)) + m.errorCounter.WithLabelValues("cleanUp").Inc() + } + if err := m.ipset.CleanUp(); err != nil { + level.Error(m.logger).Log("error", fmt.Sprintf("failed to clean up ipset: %v", err)) + m.errorCounter.WithLabelValues("cleanUp").Inc() + } +} + +func isSelf(hostname string, node *Node) bool { + return node != nil && node.Name == hostname +} + +func nodesAreEqual(a, b *Node) bool { + if !(a != nil) == (b != nil) { + return false + } + if a == b { + return true + } + return ipNetsEqual(a.ExternalIP, b.ExternalIP) && string(a.Key) == string(b.Key) && ipNetsEqual(a.InternalIP, b.InternalIP) && a.Leader == b.Leader && a.Location == b.Location && a.Name == b.Name && subnetsEqual(a.Subnet, b.Subnet) +} + +func ipNetsEqual(a, b *net.IPNet) bool { + if a == nil && b == nil { + return true + } + if (a != nil) != (b != nil) { + return false + } + if a.Mask.String() != b.Mask.String() { + return false + } + return a.IP.Equal(b.IP) +} + +func subnetsEqual(a, b *net.IPNet) bool { + if a.Mask.String() != b.Mask.String() { + return false + } + if !a.Contains(b.IP) { + return false + } + if !b.Contains(a.IP) { + return false + } + return true +} + +func linkByIndex(index int) (netlink.Link, error) { + link, err := netlink.LinkByIndex(index) + if err != nil { + return nil, fmt.Errorf("failed to get interface: %v", err) + } + return link, nil +} diff --git a/pkg/mesh/mesh_test.go b/pkg/mesh/mesh_test.go new file mode 100644 index 0000000..8bb1d75 --- /dev/null +++ b/pkg/mesh/mesh_test.go @@ -0,0 +1,146 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mesh + +import ( + "net" + "testing" +) + +func TestNewAllocator(t *testing.T) { + _, c1, err := net.ParseCIDR("10.1.0.0/16") + if err != nil { + t.Fatalf("failed to parse CIDR: %v", err) + } + a1 := newAllocator(*c1) + _, c2, err := net.ParseCIDR("10.1.0.0/32") + if err != nil { + t.Fatalf("failed to parse CIDR: %v", err) + } + a2 := newAllocator(*c2) + _, c3, err := net.ParseCIDR("10.1.0.0/31") + if err != nil { + t.Fatalf("failed to parse CIDR: %v", err) + } + a3 := newAllocator(*c3) + for _, tc := range []struct { + name string + a *allocator + next string + }{ + { + name: "10.1.0.0/16 first", + a: a1, + next: "10.1.0.1/32", + }, + { + name: "10.1.0.0/16 second", + a: a1, + next: "10.1.0.2/32", + }, + { + name: "10.1.0.0/32", + a: a2, + next: "", + }, + { + name: "10.1.0.0/31 first", + a: a3, + next: "10.1.0.1/32", + }, + { + name: "10.1.0.0/31 second", + a: a3, + next: "", + }, + } { + next := tc.a.next() + if next.String() != tc.next { + t.Errorf("test case %q: expected %s, got %s", tc.name, tc.next, next.String()) + } + } +} + +func TestReady(t *testing.T) { + internalIP := oneAddressCIDR(net.ParseIP("1.1.1.1")) + externalIP := oneAddressCIDR(net.ParseIP("2.2.2.2")) + for _, tc := range []struct { + name string + node *Node + ready bool + }{ + { + name: "nil", + node: nil, + ready: false, + }, + { + name: "empty fields", + node: &Node{}, + ready: false, + }, + { + name: "empty external IP", + node: &Node{ + InternalIP: internalIP, + Key: []byte{}, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + }, + ready: false, + }, + { + name: "empty internal IP", + node: &Node{ + ExternalIP: externalIP, + Key: []byte{}, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + }, + ready: false, + }, + { + name: "empty key", + node: &Node{ + ExternalIP: externalIP, + InternalIP: internalIP, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + }, + ready: false, + }, + { + name: "empty subnet", + node: &Node{ + ExternalIP: externalIP, + InternalIP: internalIP, + Key: []byte{}, + }, + ready: false, + }, + { + name: "valid", + node: &Node{ + ExternalIP: externalIP, + InternalIP: internalIP, + Key: []byte{}, + Subnet: &net.IPNet{IP: net.ParseIP("10.2.0.0"), Mask: net.CIDRMask(16, 32)}, + }, + ready: true, + }, + } { + ready := tc.node.Ready() + if ready != tc.ready { + t.Errorf("test case %q: expected %t, got %t", tc.name, tc.ready, ready) + } + } +} diff --git a/pkg/mesh/topology.go b/pkg/mesh/topology.go new file mode 100644 index 0000000..2ee4307 --- /dev/null +++ b/pkg/mesh/topology.go @@ -0,0 +1,334 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mesh + +import ( + "bytes" + "errors" + "fmt" + "net" + "sort" + "strings" + "text/template" + + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" +) + +var ( + confTemplate = template.Must(template.New("").Parse(`[Interface] +PrivateKey = {{.Key}} +ListenPort = {{.Port}} +{{range .Segments -}} +{{if ne .Location $.Location}} +[Peer] +PublicKey = {{.Key}} +Endpoint = {{.Endpoint}}:{{$.Port}} +AllowedIPs = {{.AllowedIPs}} +{{end}} +{{- end -}} +`)) +) + +// Topology represents the logical structure of the overlay network. +type Topology struct { + // Some fields need to be exported so that the template can read them. + Key string + Port int + // Location is the logical location of the local host. + Location string + Segments []*segment + + // hostname is the hostname of the local host. + hostname string + // leader represents whether or not the local host + // is the segment leader. + leader bool + // subnet is the entire subnet from which IPs + // for the WireGuard interfaces will be allocated. + subnet *net.IPNet + // privateIP is the private IP address of the local node. + privateIP *net.IPNet + // wireGuardCIDR is the allocated CIDR of the WireGuard + // interface of the local node. If the local node is not + // the leader, then it is nil. + wireGuardCIDR *net.IPNet +} + +type segment struct { + // Some fields need to be exported so that the template can read them. + AllowedIPs string + Endpoint string + Key string + // Location is the logical location of this segment. + Location string + + // cidrs is a slice of subnets of all peers in the segment. + cidrs []*net.IPNet + // hostnames is a slice of the hostnames of the peers in the segment. + hostnames []string + // leader is the index of the leader of the segment. + leader int + // privateIPs is a slice of private IPs of all peers in the segment. + privateIPs []net.IP + // wireGuardIP is the allocated IP address of the WireGuard + // interface on the leader of the segment. + wireGuardIP net.IP +} + +// NewTopology creates a new Topology struct from a given set of nodes. +func NewTopology(nodes map[string]*Node, granularity Granularity, hostname string, port int, key []byte, subnet *net.IPNet) (*Topology, error) { + topoMap := make(map[string][]*Node) + for _, node := range nodes { + var location string + switch granularity { + case DataCenterGranularity: + location = node.Location + case NodeGranularity: + location = node.Name + } + topoMap[location] = append(topoMap[location], node) + } + var localLocation string + switch granularity { + case DataCenterGranularity: + localLocation = nodes[hostname].Location + case NodeGranularity: + localLocation = hostname + } + + t := Topology{Key: strings.TrimSpace(string(key)), Port: port, hostname: hostname, Location: localLocation, subnet: subnet, privateIP: nodes[hostname].InternalIP} + for location := range topoMap { + // Sort the location so the result is stable. + sort.Slice(topoMap[location], func(i, j int) bool { + return topoMap[location][i].Name < topoMap[location][j].Name + }) + leader := findLeader(topoMap[location]) + if location == localLocation && topoMap[location][leader].Name == hostname { + t.leader = true + } + var allowedIPs []string + var cidrs []*net.IPNet + var hostnames []string + var privateIPs []net.IP + for _, node := range topoMap[location] { + // Allowed IPs should include: + // - the node's allocated subnet + // - the node's WireGuard IP + // - the node's internal IP + allowedIPs = append(allowedIPs, node.Subnet.String(), oneAddressCIDR(node.InternalIP.IP).String()) + cidrs = append(cidrs, node.Subnet) + hostnames = append(hostnames, node.Name) + privateIPs = append(privateIPs, node.InternalIP.IP) + } + t.Segments = append(t.Segments, &segment{ + AllowedIPs: strings.Join(allowedIPs, ", "), + Endpoint: topoMap[location][leader].ExternalIP.IP.String(), + Key: strings.TrimSpace(string(topoMap[location][leader].Key)), + Location: location, + cidrs: cidrs, + hostnames: hostnames, + leader: leader, + privateIPs: privateIPs, + }) + } + // Sort the Topology so the result is stable. + sort.Slice(t.Segments, func(i, j int) bool { + return t.Segments[i].Location < t.Segments[j].Location + }) + + // Allocate IPs to the segment leaders in a stable, coordination-free manner. + a := newAllocator(*subnet) + for _, segment := range t.Segments { + ipNet := a.next() + if ipNet == nil { + return nil, errors.New("failed to allocate an IP address; ran out of IP addresses") + } + segment.wireGuardIP = ipNet.IP + segment.AllowedIPs = fmt.Sprintf("%s, %s", segment.AllowedIPs, ipNet.String()) + if t.leader && segment.Location == t.Location { + t.wireGuardCIDR = &net.IPNet{IP: ipNet.IP, Mask: t.subnet.Mask} + } + } + + return &t, nil +} + +// RemoteSubnets identifies the subnets of the hosts in segments different than the host's. +func (t *Topology) RemoteSubnets() []*net.IPNet { + var remote []*net.IPNet + for _, s := range t.Segments { + if s == nil || s.Location == t.Location { + continue + } + remote = append(remote, s.cidrs...) + } + return remote +} + +// Routes generates a slice of routes for a given Topology. +func (t *Topology) Routes(kiloIface, privIface, tunlIface int, local bool, encapsulate Encapsulate) []*netlink.Route { + var routes []*netlink.Route + if !t.leader { + // Find the leader for this segment. + var leader net.IP + for _, segment := range t.Segments { + if segment.Location == t.Location { + leader = segment.privateIPs[segment.leader] + break + } + } + for _, segment := range t.Segments { + // First, add a route to the WireGuard IP of the segment. + routes = append(routes, encapsulateRoute(&netlink.Route{ + Dst: oneAddressCIDR(segment.wireGuardIP), + Flags: int(netlink.FLAG_ONLINK), + Gw: leader, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, encapsulate, t.privateIP, tunlIface)) + // Add routes for the current segment if local is true. + if segment.Location == t.Location { + if local { + for i := range segment.cidrs { + // Don't add routes for the local node. + if segment.privateIPs[i].Equal(t.privateIP.IP) { + continue + } + routes = append(routes, encapsulateRoute(&netlink.Route{ + Dst: segment.cidrs[i], + Flags: int(netlink.FLAG_ONLINK), + Gw: segment.privateIPs[i], + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, encapsulate, t.privateIP, tunlIface)) + } + } + continue + } + for i := range segment.cidrs { + // Add routes to the Pod CIDRs of nodes in other segments. + routes = append(routes, encapsulateRoute(&netlink.Route{ + Dst: segment.cidrs[i], + Flags: int(netlink.FLAG_ONLINK), + Gw: leader, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, encapsulate, t.privateIP, tunlIface)) + // Add routes to the private IPs of nodes in other segments. + // Number of CIDRs and private IPs always match so + // we can reuse the loop. + routes = append(routes, encapsulateRoute(&netlink.Route{ + Dst: oneAddressCIDR(segment.privateIPs[i]), + Flags: int(netlink.FLAG_ONLINK), + Gw: leader, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, encapsulate, t.privateIP, tunlIface)) + } + } + return routes + } + for _, segment := range t.Segments { + // Add routes for the current segment if local is true. + if segment.Location == t.Location { + if local { + for i := range segment.cidrs { + // Don't add routes for the local node. + if segment.privateIPs[i].Equal(t.privateIP.IP) { + continue + } + routes = append(routes, encapsulateRoute(&netlink.Route{ + Dst: segment.cidrs[i], + Flags: int(netlink.FLAG_ONLINK), + Gw: segment.privateIPs[i], + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, encapsulate, t.privateIP, tunlIface)) + } + } + continue + } + for i := range segment.cidrs { + // Add routes to the Pod CIDRs of nodes in other segments. + routes = append(routes, &netlink.Route{ + Dst: segment.cidrs[i], + Flags: int(netlink.FLAG_ONLINK), + Gw: segment.wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }) + // Add routes to the private IPs of nodes in other segments. + // Number of CIDRs and private IPs always match so + // we can reuse the loop. + routes = append(routes, &netlink.Route{ + Dst: oneAddressCIDR(segment.privateIPs[i]), + Flags: int(netlink.FLAG_ONLINK), + Gw: segment.wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }) + } + } + return routes +} + +func encapsulateRoute(route *netlink.Route, encapsulate Encapsulate, subnet *net.IPNet, tunlIface int) *netlink.Route { + if encapsulate == AlwaysEncapsulate || (encapsulate == CrossSubnetEncapsulate && !subnet.Contains(route.Gw)) { + route.LinkIndex = tunlIface + } + return route +} + +// Conf generates a WireGuard configuration file for a given Topology. +func (t *Topology) Conf() ([]byte, error) { + conf := new(bytes.Buffer) + if err := confTemplate.Execute(conf, t); err != nil { + return nil, err + } + return conf.Bytes(), nil +} + +// oneAddressCIDR takes an IP address and returns a CIDR +// that contains only that address. +func oneAddressCIDR(ip net.IP) *net.IPNet { + return &net.IPNet{IP: ip, Mask: net.CIDRMask(len(ip)*8, len(ip)*8)} +} + +// findLeader selects a leader for the nodes in a segment; +// it will select the first node that says it should lead +// or the first node in the segment if none have volunteered, +// always preferring those with a public external IP address, +func findLeader(nodes []*Node) int { + var leaders, public []int + for i := range nodes { + if nodes[i].Leader { + if isPublic(nodes[i].ExternalIP) { + return i + } + leaders = append(leaders, i) + } + if isPublic(nodes[i].ExternalIP) { + public = append(public, i) + } + } + if len(leaders) != 0 { + return leaders[0] + } + if len(public) != 0 { + return public[0] + } + return 0 +} diff --git a/pkg/mesh/topology_test.go b/pkg/mesh/topology_test.go new file mode 100644 index 0000000..c310e23 --- /dev/null +++ b/pkg/mesh/topology_test.go @@ -0,0 +1,982 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mesh + +import ( + "net" + "strings" + "testing" + + "github.com/kylelemons/godebug/pretty" + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" +) + +func allowedIPs(ips ...string) string { + return strings.Join(ips, ", ") +} + +func setup(t *testing.T) (map[string]*Node, []byte, int, *net.IPNet) { + key := []byte("private") + port := 51820 + _, kiloNet, err := net.ParseCIDR("10.4.0.0/16") + if err != nil { + t.Fatalf("failed to parse Kilo subnet CIDR: %v", err) + } + ip, e1, err := net.ParseCIDR("10.1.0.1/16") + if err != nil { + t.Fatalf("failed to parse external IP CIDR: %v", err) + } + e1.IP = ip + ip, e2, err := net.ParseCIDR("10.1.0.2/16") + if err != nil { + t.Fatalf("failed to parse external IP CIDR: %v", err) + } + e2.IP = ip + ip, e3, err := net.ParseCIDR("10.1.0.3/16") + if err != nil { + t.Fatalf("failed to parse external IP CIDR: %v", err) + } + e3.IP = ip + ip, i1, err := net.ParseCIDR("192.168.0.1/24") + if err != nil { + t.Fatalf("failed to parse internal IP CIDR: %v", err) + } + i1.IP = ip + ip, i2, err := net.ParseCIDR("192.168.0.2/24") + if err != nil { + t.Fatalf("failed to parse internal IP CIDR: %v", err) + } + i2.IP = ip + nodes := map[string]*Node{ + "a": { + Name: "a", + ExternalIP: e1, + InternalIP: i1, + Location: "1", + Subnet: &net.IPNet{IP: net.ParseIP("10.2.1.0"), Mask: net.CIDRMask(24, 32)}, + Key: []byte("key1"), + }, + "b": { + Name: "b", + ExternalIP: e2, + InternalIP: i1, + Location: "2", + Subnet: &net.IPNet{IP: net.ParseIP("10.2.2.0"), Mask: net.CIDRMask(24, 32)}, + Key: []byte("key2"), + }, + "c": { + Name: "c", + ExternalIP: e3, + InternalIP: i2, + // Same location a node b. + Location: "2", + Subnet: &net.IPNet{IP: net.ParseIP("10.2.3.0"), Mask: net.CIDRMask(24, 32)}, + Key: []byte("key3"), + }, + } + return nodes, key, port, kiloNet +} + +func TestNewTopology(t *testing.T) { + nodes, key, port, kiloNet := setup(t) + + w1 := net.ParseIP("10.4.0.1").To4() + w2 := net.ParseIP("10.4.0.2").To4() + w3 := net.ParseIP("10.4.0.3").To4() + for _, tc := range []struct { + name string + granularity Granularity + hostname string + result *Topology + }{ + { + name: "datacenter from a", + granularity: DataCenterGranularity, + hostname: nodes["a"].Name, + result: &Topology{ + hostname: nodes["a"].Name, + leader: true, + Location: nodes["a"].Location, + subnet: kiloNet, + privateIP: nodes["a"].InternalIP, + wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)}, + Segments: []*segment{ + { + AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"), + Endpoint: nodes["a"].ExternalIP.IP.String(), + Key: string(nodes["a"].Key), + Location: nodes["a"].Location, + cidrs: []*net.IPNet{nodes["a"].Subnet}, + hostnames: []string{"a"}, + privateIPs: []net.IP{nodes["a"].InternalIP.IP}, + wireGuardIP: w1, + }, + { + AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.2/32"), + Endpoint: nodes["b"].ExternalIP.IP.String(), + Key: string(nodes["b"].Key), + Location: nodes["b"].Location, + cidrs: []*net.IPNet{nodes["b"].Subnet, nodes["c"].Subnet}, + hostnames: []string{"b", "c"}, + privateIPs: []net.IP{nodes["b"].InternalIP.IP, nodes["c"].InternalIP.IP}, + wireGuardIP: w2, + }, + }, + }, + }, + { + name: "datacenter from b", + granularity: DataCenterGranularity, + hostname: nodes["b"].Name, + result: &Topology{ + hostname: nodes["b"].Name, + leader: true, + Location: nodes["b"].Location, + subnet: kiloNet, + privateIP: nodes["b"].InternalIP, + wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)}, + Segments: []*segment{ + { + AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"), + Endpoint: nodes["a"].ExternalIP.IP.String(), + Key: string(nodes["a"].Key), + Location: nodes["a"].Location, + cidrs: []*net.IPNet{nodes["a"].Subnet}, + hostnames: []string{"a"}, + privateIPs: []net.IP{nodes["a"].InternalIP.IP}, + wireGuardIP: w1, + }, + { + AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.2/32"), + Endpoint: nodes["b"].ExternalIP.IP.String(), + Key: string(nodes["b"].Key), + Location: nodes["b"].Location, + cidrs: []*net.IPNet{nodes["b"].Subnet, nodes["c"].Subnet}, + hostnames: []string{"b", "c"}, + privateIPs: []net.IP{nodes["b"].InternalIP.IP, nodes["c"].InternalIP.IP}, + wireGuardIP: w2, + }, + }, + }, + }, + { + name: "datacenter from c", + granularity: DataCenterGranularity, + hostname: nodes["c"].Name, + result: &Topology{ + hostname: nodes["c"].Name, + leader: false, + Location: nodes["b"].Location, + subnet: kiloNet, + privateIP: nodes["c"].InternalIP, + wireGuardCIDR: nil, + Segments: []*segment{ + { + AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"), + Endpoint: nodes["a"].ExternalIP.IP.String(), + Key: string(nodes["a"].Key), + Location: nodes["a"].Location, + cidrs: []*net.IPNet{nodes["a"].Subnet}, + hostnames: []string{"a"}, + privateIPs: []net.IP{nodes["a"].InternalIP.IP}, + wireGuardIP: w1, + }, + { + AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.2/32"), + Endpoint: nodes["b"].ExternalIP.IP.String(), + Key: string(nodes["b"].Key), + Location: nodes["b"].Location, + cidrs: []*net.IPNet{nodes["b"].Subnet, nodes["c"].Subnet}, + hostnames: []string{"b", "c"}, + privateIPs: []net.IP{nodes["b"].InternalIP.IP, nodes["c"].InternalIP.IP}, + wireGuardIP: w2, + }, + }, + }, + }, + { + name: "node from a", + granularity: NodeGranularity, + hostname: nodes["a"].Name, + result: &Topology{ + hostname: nodes["a"].Name, + leader: true, + Location: nodes["a"].Name, + subnet: kiloNet, + privateIP: nodes["a"].InternalIP, + wireGuardCIDR: &net.IPNet{IP: w1, Mask: net.CIDRMask(16, 32)}, + Segments: []*segment{ + { + AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"), + Endpoint: nodes["a"].ExternalIP.IP.String(), + Key: string(nodes["a"].Key), + Location: nodes["a"].Name, + cidrs: []*net.IPNet{nodes["a"].Subnet}, + hostnames: []string{"a"}, + privateIPs: []net.IP{nodes["a"].InternalIP.IP}, + wireGuardIP: w1, + }, + { + AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", "10.4.0.2/32"), + Endpoint: nodes["b"].ExternalIP.IP.String(), + Key: string(nodes["b"].Key), + Location: nodes["b"].Name, + cidrs: []*net.IPNet{nodes["b"].Subnet}, + hostnames: []string{"b"}, + privateIPs: []net.IP{nodes["b"].InternalIP.IP}, + wireGuardIP: w2, + }, + { + AllowedIPs: allowedIPs(nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.3/32"), + Endpoint: nodes["c"].ExternalIP.IP.String(), + Key: string(nodes["c"].Key), + Location: nodes["c"].Name, + cidrs: []*net.IPNet{nodes["c"].Subnet}, + hostnames: []string{"c"}, + privateIPs: []net.IP{nodes["c"].InternalIP.IP}, + wireGuardIP: w3, + }, + }, + }, + }, + { + name: "node from b", + granularity: NodeGranularity, + hostname: nodes["b"].Name, + result: &Topology{ + hostname: nodes["b"].Name, + leader: true, + Location: nodes["b"].Name, + subnet: kiloNet, + privateIP: nodes["b"].InternalIP, + wireGuardCIDR: &net.IPNet{IP: w2, Mask: net.CIDRMask(16, 32)}, + Segments: []*segment{ + { + AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"), + Endpoint: nodes["a"].ExternalIP.IP.String(), + Key: string(nodes["a"].Key), + Location: nodes["a"].Name, + cidrs: []*net.IPNet{nodes["a"].Subnet}, + hostnames: []string{"a"}, + privateIPs: []net.IP{nodes["a"].InternalIP.IP}, + wireGuardIP: w1, + }, + { + AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", "10.4.0.2/32"), + Endpoint: nodes["b"].ExternalIP.IP.String(), + Key: string(nodes["b"].Key), + Location: nodes["b"].Name, + cidrs: []*net.IPNet{nodes["b"].Subnet}, + hostnames: []string{"b"}, + privateIPs: []net.IP{nodes["b"].InternalIP.IP}, + wireGuardIP: w2, + }, + { + AllowedIPs: allowedIPs(nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.3/32"), + Endpoint: nodes["c"].ExternalIP.IP.String(), + Key: string(nodes["c"].Key), + Location: nodes["c"].Name, + cidrs: []*net.IPNet{nodes["c"].Subnet}, + hostnames: []string{"c"}, + privateIPs: []net.IP{nodes["c"].InternalIP.IP}, + wireGuardIP: w3, + }, + }, + }, + }, + { + name: "node from c", + granularity: NodeGranularity, + hostname: nodes["c"].Name, + result: &Topology{ + hostname: nodes["c"].Name, + leader: true, + Location: nodes["c"].Name, + subnet: kiloNet, + privateIP: nodes["c"].InternalIP, + wireGuardCIDR: &net.IPNet{IP: w3, Mask: net.CIDRMask(16, 32)}, + Segments: []*segment{ + { + AllowedIPs: allowedIPs(nodes["a"].Subnet.String(), "192.168.0.1/32", "10.4.0.1/32"), + Endpoint: nodes["a"].ExternalIP.IP.String(), + Key: string(nodes["a"].Key), + Location: nodes["a"].Name, + cidrs: []*net.IPNet{nodes["a"].Subnet}, + hostnames: []string{"a"}, + privateIPs: []net.IP{nodes["a"].InternalIP.IP}, + wireGuardIP: w1, + }, + { + AllowedIPs: allowedIPs(nodes["b"].Subnet.String(), "192.168.0.1/32", "10.4.0.2/32"), + Endpoint: nodes["b"].ExternalIP.IP.String(), + Key: string(nodes["b"].Key), + Location: nodes["b"].Name, + cidrs: []*net.IPNet{nodes["b"].Subnet}, + hostnames: []string{"b"}, + privateIPs: []net.IP{nodes["b"].InternalIP.IP}, + wireGuardIP: w2, + }, + { + AllowedIPs: allowedIPs(nodes["c"].Subnet.String(), "192.168.0.2/32", "10.4.0.3/32"), + Endpoint: nodes["c"].ExternalIP.IP.String(), + Key: string(nodes["c"].Key), + Location: nodes["c"].Name, + cidrs: []*net.IPNet{nodes["c"].Subnet}, + hostnames: []string{"c"}, + privateIPs: []net.IP{nodes["c"].InternalIP.IP}, + wireGuardIP: w3, + }, + }, + }, + }, + } { + tc.result.Key = string(key) + tc.result.Port = port + topo, err := NewTopology(nodes, tc.granularity, tc.hostname, port, key, kiloNet) + if err != nil { + t.Errorf("test case %q: failed to generate Topology: %v", tc.name, err) + } + if diff := pretty.Compare(topo, tc.result); diff != "" { + t.Errorf("test case %q: got diff: %v", tc.name, diff) + } + } +} + +func mustTopo(t *testing.T, nodes map[string]*Node, granularity Granularity, hostname string, port int, key []byte, subnet *net.IPNet) *Topology { + topo, err := NewTopology(nodes, granularity, hostname, port, key, subnet) + if err != nil { + t.Errorf("failed to generate Topology: %v", err) + } + return topo +} + +func TestRoutes(t *testing.T) { + nodes, key, port, kiloNet := setup(t) + kiloIface := 0 + privIface := 1 + pubIface := 2 + mustTopoForGranularityAndHost := func(granularity Granularity, hostname string) *Topology { + return mustTopo(t, nodes, granularity, hostname, port, key, kiloNet) + } + + for _, tc := range []struct { + name string + local bool + topology *Topology + result []*netlink.Route + }{ + { + name: "datacenter from a", + topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name), + result: []*netlink.Route{ + { + Dst: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].cidrs[0], + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["b"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].cidrs[1], + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["c"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "datacenter from b", + topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name), + result: []*netlink.Route{ + { + Dst: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name).Segments[0].cidrs[0], + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["a"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "datacenter from c", + topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name), + result: []*netlink.Route{ + { + Dst: oneAddressCIDR(mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name).Segments[0].wireGuardIP), + Flags: int(netlink.FLAG_ONLINK), + Gw: nodes["b"].InternalIP.IP, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name).Segments[0].cidrs[0], + Flags: int(netlink.FLAG_ONLINK), + Gw: nodes["b"].InternalIP.IP, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["a"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: nodes["b"].InternalIP.IP, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name).Segments[1].wireGuardIP), + Flags: int(netlink.FLAG_ONLINK), + Gw: nodes["b"].InternalIP.IP, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "node from a", + topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name), + result: []*netlink.Route{ + { + Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[1].cidrs[0], + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["b"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[2].cidrs[0], + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[2].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["c"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[2].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "node from b", + topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name), + result: []*netlink.Route{ + { + Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[0].cidrs[0], + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["a"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[2].cidrs[0], + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[2].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["c"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[2].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "node from c", + topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name), + result: []*netlink.Route{ + { + Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[0].cidrs[0], + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["a"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[1].cidrs[0], + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["b"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "datacenter from a local", + local: true, + topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name), + result: []*netlink.Route{ + { + Dst: nodes["b"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["b"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: nodes["c"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["c"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "datacenter from b local", + local: true, + topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name), + result: []*netlink.Route{ + { + Dst: nodes["a"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["a"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["b"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: nodes["c"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: nodes["c"].InternalIP.IP, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "datacenter from c local", + local: true, + topology: mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name), + result: []*netlink.Route{ + { + Dst: oneAddressCIDR(mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name).Segments[0].wireGuardIP), + Flags: int(netlink.FLAG_ONLINK), + Gw: nodes["b"].InternalIP.IP, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: nodes["a"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: nodes["b"].InternalIP.IP, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["a"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: nodes["b"].InternalIP.IP, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(mustTopoForGranularityAndHost(DataCenterGranularity, nodes["c"].Name).Segments[1].wireGuardIP), + Flags: int(netlink.FLAG_ONLINK), + Gw: nodes["b"].InternalIP.IP, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: nodes["b"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: nodes["b"].InternalIP.IP, + LinkIndex: privIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "node from a local", + local: true, + topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name), + result: []*netlink.Route{ + { + Dst: nodes["b"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["b"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: nodes["c"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[2].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["c"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["a"].Name).Segments[2].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "node from b local", + local: true, + topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name), + result: []*netlink.Route{ + { + Dst: nodes["a"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["a"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: nodes["c"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[2].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["c"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["b"].Name).Segments[2].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + { + name: "node from c local", + local: true, + topology: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name), + result: []*netlink.Route{ + { + Dst: nodes["a"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["a"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[0].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: nodes["b"].Subnet, + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + { + Dst: oneAddressCIDR(nodes["b"].InternalIP.IP), + Flags: int(netlink.FLAG_ONLINK), + Gw: mustTopoForGranularityAndHost(NodeGranularity, nodes["c"].Name).Segments[1].wireGuardIP, + LinkIndex: kiloIface, + Protocol: unix.RTPROT_STATIC, + }, + }, + }, + } { + routes := tc.topology.Routes(kiloIface, privIface, pubIface, tc.local, NeverEncapsulate) + if diff := pretty.Compare(routes, tc.result); diff != "" { + t.Errorf("test case %q: got diff: %v", tc.name, diff) + } + } +} + +func TestConf(t *testing.T) { + nodes, key, port, kiloNet := setup(t) + for _, tc := range []struct { + name string + topology *Topology + result string + }{ + { + name: "datacenter from a", + topology: mustTopo(t, nodes, DataCenterGranularity, nodes["a"].Name, port, key, kiloNet), + result: `[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +PublicKey = key2 +Endpoint = 10.1.0.2:51820 +AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32 +`, + }, + { + name: "datacenter from b", + topology: mustTopo(t, nodes, DataCenterGranularity, nodes["b"].Name, port, key, kiloNet), + result: `[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +PublicKey = key1 +Endpoint = 10.1.0.1:51820 +AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32 +`, + }, + { + name: "datacenter from c", + topology: mustTopo(t, nodes, DataCenterGranularity, nodes["c"].Name, port, key, kiloNet), + result: `[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +PublicKey = key1 +Endpoint = 10.1.0.1:51820 +AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32 +`, + }, + { + name: "node from a", + topology: mustTopo(t, nodes, NodeGranularity, nodes["a"].Name, port, key, kiloNet), + result: `[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +PublicKey = key2 +Endpoint = 10.1.0.2:51820 +AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.4.0.2/32 + +[Peer] +PublicKey = key3 +Endpoint = 10.1.0.3:51820 +AllowedIPs = 10.2.3.0/24, 192.168.0.2/32, 10.4.0.3/32 +`, + }, + { + name: "node from b", + topology: mustTopo(t, nodes, NodeGranularity, nodes["b"].Name, port, key, kiloNet), + result: `[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +PublicKey = key1 +Endpoint = 10.1.0.1:51820 +AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32 + +[Peer] +PublicKey = key3 +Endpoint = 10.1.0.3:51820 +AllowedIPs = 10.2.3.0/24, 192.168.0.2/32, 10.4.0.3/32 +`, + }, + { + name: "node from c", + topology: mustTopo(t, nodes, NodeGranularity, nodes["c"].Name, port, key, kiloNet), + result: `[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +PublicKey = key1 +Endpoint = 10.1.0.1:51820 +AllowedIPs = 10.2.1.0/24, 192.168.0.1/32, 10.4.0.1/32 + +[Peer] +PublicKey = key2 +Endpoint = 10.1.0.2:51820 +AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.4.0.2/32 +`, + }, + } { + conf, err := tc.topology.Conf() + if err != nil { + t.Errorf("test case %q: failed to generate conf: %v", tc.name, err) + } + if string(conf) != tc.result { + t.Errorf("test case %q: expected %s got %s", tc.name, tc.result, string(conf)) + } + } +} + +func TestFindLeader(t *testing.T) { + ip, e1, err := net.ParseCIDR("10.0.0.1/32") + if err != nil { + t.Fatalf("failed to parse external IP CIDR: %v", err) + } + e1.IP = ip + ip, e2, err := net.ParseCIDR("8.8.8.8/32") + if err != nil { + t.Fatalf("failed to parse external IP CIDR: %v", err) + } + e2.IP = ip + + nodes := []*Node{ + { + Name: "a", + ExternalIP: e1, + }, + { + Name: "b", + ExternalIP: e2, + }, + { + Name: "c", + ExternalIP: e2, + }, + { + Name: "d", + ExternalIP: e1, + Leader: true, + }, + { + Name: "2", + ExternalIP: e2, + Leader: true, + }, + } + for _, tc := range []struct { + name string + nodes []*Node + out int + }{ + { + name: "nil", + nodes: nil, + out: 0, + }, + { + name: "one", + nodes: []*Node{nodes[0]}, + out: 0, + }, + { + name: "non-leaders", + nodes: []*Node{nodes[0], nodes[1], nodes[2]}, + out: 1, + }, + { + name: "leaders", + nodes: []*Node{nodes[3], nodes[4]}, + out: 1, + }, + { + name: "public", + nodes: []*Node{nodes[1], nodes[2], nodes[4]}, + out: 2, + }, + { + name: "private", + nodes: []*Node{nodes[0], nodes[3]}, + out: 1, + }, + { + name: "all", + nodes: nodes, + out: 4, + }, + } { + l := findLeader(tc.nodes) + if l != tc.out { + t.Errorf("test case %q: expected %d got %d", tc.name, tc.out, l) + } + } +} diff --git a/pkg/route/route.go b/pkg/route/route.go new file mode 100644 index 0000000..7548328 --- /dev/null +++ b/pkg/route/route.go @@ -0,0 +1,173 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package route + +import ( + "errors" + "fmt" + "sync" + + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" +) + +// Table represents a routing table. +// Table can safely be used concurrently. +type Table struct { + errors chan error + mu sync.Mutex + routes map[string]*netlink.Route + subscribed bool + + // Make these functions fields to allow + // for testing. + add func(*netlink.Route) error + del func(*netlink.Route) error +} + +// NewTable generates a new table. +func NewTable() *Table { + return &Table{ + errors: make(chan error), + routes: make(map[string]*netlink.Route), + add: netlink.RouteReplace, + del: func(r *netlink.Route) error { + name := routeToString(r) + if name == "" { + return errors.New("attempting to delete invalid route") + } + routes, err := netlink.RouteList(nil, netlink.FAMILY_ALL) + if err != nil { + return fmt.Errorf("failed to list routes before deletion: %v", err) + } + for _, route := range routes { + if routeToString(&route) == name { + return netlink.RouteDel(r) + } + } + return nil + }, + } +} + +// Run watches for changes to routes in the table and reconciles +// the table against the desired state. +func (t *Table) Run(stop <-chan struct{}) (<-chan error, error) { + t.mu.Lock() + if t.subscribed { + t.mu.Unlock() + return t.errors, nil + } + // Ensure a given instance only subscribes once. + t.subscribed = true + t.mu.Unlock() + events := make(chan netlink.RouteUpdate) + if err := netlink.RouteSubscribe(events, stop); err != nil { + return t.errors, fmt.Errorf("failed to subscribe to route events: %v", err) + } + go func() { + defer close(t.errors) + for { + var e netlink.RouteUpdate + select { + case e = <-events: + case <-stop: + return + } + switch e.Type { + // Watch for deleted routes to reconcile this table's routes. + case unix.RTM_DELROUTE: + t.mu.Lock() + for _, r := range t.routes { + // If any deleted route's destination matches a destination + // in the table, reset the corresponding route just in case. + if r.Dst.IP.Equal(e.Route.Dst.IP) && r.Dst.Mask.String() == e.Route.Dst.Mask.String() { + if err := t.add(r); err != nil { + nonBlockingSend(t.errors, fmt.Errorf("failed add route: %v", err)) + } + } + } + t.mu.Unlock() + } + } + }() + return t.errors, nil +} + +// CleanUp will clean up any routes created by the instance. +func (t *Table) CleanUp() error { + t.mu.Lock() + defer t.mu.Unlock() + for k, route := range t.routes { + if err := t.del(route); err != nil { + return fmt.Errorf("failed to delete route: %v", err) + } + delete(t.routes, k) + } + return nil +} + +// Set idempotently overwrites any routes previously defined +// for the table with the given set of routes. +func (t *Table) Set(routes []*netlink.Route) error { + r := make(map[string]*netlink.Route) + for _, route := range routes { + if route == nil { + continue + } + r[routeToString(route)] = route + } + t.mu.Lock() + defer t.mu.Unlock() + for k := range t.routes { + if _, ok := r[k]; !ok { + if err := t.del(t.routes[k]); err != nil { + return fmt.Errorf("failed to delete route: %v", err) + } + delete(t.routes, k) + } + } + for k := range r { + if _, ok := t.routes[k]; !ok { + if err := t.add(r[k]); err != nil { + return fmt.Errorf("failed to add route %q: %v", routeToString(r[k]), err) + } + t.routes[k] = r[k] + } + } + return nil +} + +func nonBlockingSend(errors chan<- error, err error) { + select { + case errors <- err: + default: + } +} + +func routeToString(route *netlink.Route) string { + if route == nil || route.Dst == nil { + return "" + } + src := "-" + if route.Src != nil { + src = route.Src.String() + } + gw := "-" + if route.Gw != nil { + gw = route.Gw.String() + } + return fmt.Sprintf("dst: %s, via: %s, src: %s, dev: %d", route.Dst.String(), gw, src, route.LinkIndex) +} diff --git a/pkg/route/route_test.go b/pkg/route/route_test.go new file mode 100644 index 0000000..1e940b0 --- /dev/null +++ b/pkg/route/route_test.go @@ -0,0 +1,262 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package route + +import ( + "errors" + "net" + "testing" + + "github.com/vishvananda/netlink" +) + +func TestSet(t *testing.T) { + _, c1, err := net.ParseCIDR("10.2.0.0/24") + if err != nil { + t.Fatalf("failed to parse CIDR: %v", err) + } + _, c2, err := net.ParseCIDR("10.1.0.0/24") + if err != nil { + t.Fatalf("failed to parse CIDR: %v", err) + } + add := func(backend map[string]*netlink.Route) func(*netlink.Route) error { + return func(r *netlink.Route) error { + backend[routeToString(r)] = r + return nil + } + } + del := func(backend map[string]*netlink.Route) func(*netlink.Route) error { + return func(r *netlink.Route) error { + delete(backend, routeToString(r)) + return nil + } + } + adderr := func(backend map[string]*netlink.Route) func(*netlink.Route) error { + return func(r *netlink.Route) error { + return errors.New(routeToString(r)) + } + } + for _, tc := range []struct { + name string + routes []*netlink.Route + err bool + add func(map[string]*netlink.Route) func(*netlink.Route) error + del func(map[string]*netlink.Route) func(*netlink.Route) error + }{ + { + name: "empty", + routes: nil, + err: false, + add: add, + del: del, + }, + { + name: "single", + routes: []*netlink.Route{ + { + Dst: c1, + Gw: net.ParseIP("10.1.0.1"), + }, + }, + err: false, + add: add, + del: del, + }, + { + name: "multiple", + routes: []*netlink.Route{ + { + Dst: c1, + Gw: net.ParseIP("10.1.0.1"), + }, + { + Dst: c2, + Gw: net.ParseIP("127.0.0.1"), + }, + }, + err: false, + add: add, + del: del, + }, + { + name: "err empty", + routes: nil, + err: false, + add: adderr, + del: del, + }, + { + name: "err", + routes: []*netlink.Route{ + { + Dst: c1, + Gw: net.ParseIP("10.1.0.1"), + }, + { + Dst: c2, + Gw: net.ParseIP("127.0.0.1"), + }, + }, + err: true, + add: adderr, + del: del, + }, + } { + backend := make(map[string]*netlink.Route) + a := tc.add(backend) + d := tc.del(backend) + table := NewTable() + table.add = a + table.del = d + if err := table.Set(tc.routes); (err != nil) != tc.err { + no := "no" + if tc.err { + no = "an" + } + t.Errorf("test case %q: got unexpected result: expected %s error, got %v", tc.name, no, err) + } + // If no error was expected, then compare the backend to the input. + if !tc.err { + for _, r := range tc.routes { + r1 := backend[routeToString(r)] + r2 := table.routes[routeToString(r)] + if r != r1 || r != r2 { + t.Errorf("test case %q: expected all routes to be equal: expected %v, got %v and %v", tc.name, r, r1, r2) + } + } + } + } +} + +func TestCleanUp(t *testing.T) { + _, c1, err := net.ParseCIDR("10.2.0.0/24") + if err != nil { + t.Fatalf("failed to parse CIDR: %v", err) + } + _, c2, err := net.ParseCIDR("10.1.0.0/24") + if err != nil { + t.Fatalf("failed to parse CIDR: %v", err) + } + add := func(backend map[string]*netlink.Route) func(*netlink.Route) error { + return func(r *netlink.Route) error { + backend[routeToString(r)] = r + return nil + } + } + del := func(backend map[string]*netlink.Route) func(*netlink.Route) error { + return func(r *netlink.Route) error { + delete(backend, routeToString(r)) + return nil + } + } + delerr := func(backend map[string]*netlink.Route) func(*netlink.Route) error { + return func(r *netlink.Route) error { + return errors.New(routeToString(r)) + } + } + for _, tc := range []struct { + name string + routes []*netlink.Route + err bool + add func(map[string]*netlink.Route) func(*netlink.Route) error + del func(map[string]*netlink.Route) func(*netlink.Route) error + }{ + { + name: "empty", + routes: nil, + err: false, + add: add, + del: del, + }, + { + name: "single", + routes: []*netlink.Route{ + { + Dst: c1, + Gw: net.ParseIP("10.1.0.1"), + }, + }, + err: false, + add: add, + del: del, + }, + { + name: "multiple", + routes: []*netlink.Route{ + { + Dst: c1, + Gw: net.ParseIP("10.1.0.1"), + }, + { + Dst: c2, + Gw: net.ParseIP("127.0.0.1"), + }, + }, + err: false, + add: add, + del: del, + }, + { + name: "err empty", + routes: nil, + err: false, + add: add, + del: delerr, + }, + { + name: "err", + routes: []*netlink.Route{ + { + Dst: c1, + Gw: net.ParseIP("10.1.0.1"), + }, + { + Dst: c2, + Gw: net.ParseIP("127.0.0.1"), + }, + }, + err: true, + add: add, + del: delerr, + }, + } { + backend := make(map[string]*netlink.Route) + a := tc.add(backend) + d := tc.del(backend) + table := NewTable() + table.add = a + table.del = d + if err := table.Set(tc.routes); err != nil { + t.Fatalf("test case %q: Set should not fail: %v", tc.name, err) + } + if err := table.CleanUp(); (err != nil) != tc.err { + no := "no" + if tc.err { + no = "an" + } + t.Errorf("test case %q: got unexpected result: expected %s error, got %v", tc.name, no, err) + } + // If no error was expected, then compare the backend to the input. + if !tc.err { + for _, r := range tc.routes { + r1 := backend[routeToString(r)] + r2 := table.routes[routeToString(r)] + if r1 != nil || r2 != nil { + t.Errorf("test case %q: expected all routes to be nil: expected got %v and %v", tc.name, r1, r2) + } + } + } + } +} diff --git a/pkg/version/version.go b/pkg/version/version.go new file mode 100644 index 0000000..f1add8e --- /dev/null +++ b/pkg/version/version.go @@ -0,0 +1,18 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +// Version is the version of Kilo. +var Version = "was not built properly" diff --git a/pkg/wireguard/wireguard.go b/pkg/wireguard/wireguard.go new file mode 100644 index 0000000..ce54856 --- /dev/null +++ b/pkg/wireguard/wireguard.go @@ -0,0 +1,183 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wireguard + +import ( + "bytes" + "fmt" + "os/exec" + "regexp" + "sort" + "strconv" + + "github.com/vishvananda/netlink" + "gopkg.in/ini.v1" +) + +type wgLink struct { + a netlink.LinkAttrs + t string +} + +func (w wgLink) Attrs() *netlink.LinkAttrs { + return &w.a +} + +func (w wgLink) Type() string { + return w.t +} + +// New creates a new WireGuard interface. +func New(prefix string) (int, error) { + links, err := netlink.LinkList() + if err != nil { + return 0, fmt.Errorf("failed to list links: %v", err) + } + max := 0 + re := regexp.MustCompile(fmt.Sprintf("^%s([0-9]+)$", prefix)) + for _, link := range links { + if matches := re.FindStringSubmatch(link.Attrs().Name); len(matches) == 2 { + i, err := strconv.Atoi(matches[1]) + if err != nil { + // This should never happen. + return 0, fmt.Errorf("failed to parse digits as an integer: %v", err) + } + if i >= max { + max = i + 1 + } + } + } + name := fmt.Sprintf("%s%d", prefix, max) + wl := wgLink{a: netlink.NewLinkAttrs(), t: "wireguard"} + wl.a.Name = name + if err := netlink.LinkAdd(wl); err != nil { + return 0, fmt.Errorf("failed to create interface %s: %v", name, err) + } + link, err := netlink.LinkByName(name) + if err != nil { + return 0, fmt.Errorf("failed to get interface index: %v", err) + } + return link.Attrs().Index, nil +} + +// Keys generates a WireGuard private and public key-pair. +func Keys() ([]byte, []byte, error) { + private, err := GenKey() + if err != nil { + return nil, nil, fmt.Errorf("failed to generate private key: %v", err) + } + public, err := PubKey(private) + return private, public, err +} + +// GenKey generates a WireGuard private key. +func GenKey() ([]byte, error) { + return exec.Command("wg", "genkey").Output() +} + +// PubKey generates a WireGuard public key for a given private key. +func PubKey(key []byte) ([]byte, error) { + cmd := exec.Command("wg", "pubkey") + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, fmt.Errorf("failed to open pipe to stdin: %v", err) + } + + go func() { + defer stdin.Close() + stdin.Write(key) + }() + + public, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("failed to generate public key: %v", err) + } + return public, nil +} + +// SetConf applies a WireGuard configuration file to the given interface. +func SetConf(iface string, path string) error { + cmd := exec.Command("wg", "setconf", iface, path) + var stderr bytes.Buffer + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to apply the WireGuard configuration: %s", stderr.String()) + } + return nil +} + +// ShowConf gets the WireGuard configuration for the given interface. +func ShowConf(iface string) ([]byte, error) { + cmd := exec.Command("wg", "showconf", iface) + var stderr, stdout bytes.Buffer + cmd.Stderr = &stderr + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("failed to read the WireGuard configuration: %s", stderr.String()) + } + return stdout.Bytes(), nil +} + +// CompareConf compares two WireGuard configurations. +// It returns true if they are equal, false if they are not, +// and any error that was encountered. +// Note: CompareConf only goes one level deep, as WireGuard +// configurations are not nested further than that. +func CompareConf(a, b []byte) (bool, error) { + iniA, err := ini.Load(a) + if err != nil { + return false, fmt.Errorf("failed to parse configuration: %v", err) + } + iniB, err := ini.Load(b) + if err != nil { + return false, fmt.Errorf("failed to parse configuration: %v", err) + } + secsA, secsB := iniA.SectionStrings(), iniB.SectionStrings() + if len(secsA) != len(secsB) { + return false, nil + } + sort.Strings(secsA) + sort.Strings(secsB) + var keysA, keysB []string + var valsA, valsB []string + for i := range secsA { + if secsA[i] != secsB[i] { + return false, nil + } + keysA, keysB = iniA.Section(secsA[i]).KeyStrings(), iniB.Section(secsB[i]).KeyStrings() + if len(keysA) != len(keysB) { + return false, nil + } + sort.Strings(keysA) + sort.Strings(keysB) + for j := range keysA { + if keysA[j] != keysB[j] { + return false, nil + } + valsA, valsB = iniA.Section(secsA[i]).Key(keysA[j]).Strings(","), iniB.Section(secsB[i]).Key(keysB[j]).Strings(",") + if len(valsA) != len(valsB) { + return false, nil + } + sort.Strings(valsA) + sort.Strings(valsB) + for k := range valsA { + if valsA[k] != valsB[k] { + return false, nil + } + } + } + } + return true, nil +} diff --git a/pkg/wireguard/wireguard_test.go b/pkg/wireguard/wireguard_test.go new file mode 100644 index 0000000..f95aa59 --- /dev/null +++ b/pkg/wireguard/wireguard_test.go @@ -0,0 +1,143 @@ +// Copyright 2019 the Kilo authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wireguard + +import ( + "testing" +) + +func TestCompareConf(t *testing.T) { + for _, tc := range []struct { + name string + a []byte + b []byte + out bool + }{ + { + name: "empty", + a: []byte{}, + b: []byte{}, + out: true, + }, + { + name: "key and value order", + a: []byte(`[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +Endpoint = 10.1.0.2:51820 +PublicKey = key +AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32 +`), + b: []byte(`[Interface] +ListenPort = 51820 +PrivateKey = private + +[Peer] +PublicKey = key +AllowedIPs = 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32, 10.2.2.0/24 +Endpoint = 10.1.0.2:51820 +`), + out: true, + }, + { + name: "whitespace", + a: []byte(`[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +Endpoint = 10.1.0.2:51820 +PublicKey = key +AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32 +`), + b: []byte(`[Interface] +PrivateKey=private +ListenPort=51820 +[Peer] +Endpoint=10.1.0.2:51820 +PublicKey=key +AllowedIPs=10.2.2.0/24,192.168.0.1/32,10.2.3.0/24,192.168.0.2/32,10.4.0.2/32 +`), + out: true, + }, + { + name: "missing key", + a: []byte(`[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +Endpoint = 10.1.0.2:51820 +PublicKey = key +AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32 +`), + b: []byte(`[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +PublicKey = key +AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32 +`), + out: false, + }, + { + name: "section order", + a: []byte(`[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +Endpoint = 10.1.0.2:51820 +PublicKey = key +AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32 +`), + b: []byte(`[Peer] +Endpoint = 10.1.0.2:51820 +PublicKey = key +AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32 + +[Interface] +PrivateKey = private +ListenPort = 51820 +`), + out: true, + }, + { + name: "one empty", + a: []byte(`[Interface] +PrivateKey = private +ListenPort = 51820 + +[Peer] +Endpoint = 10.1.0.2:51820 +PublicKey = key +AllowedIPs = 10.2.2.0/24, 192.168.0.1/32, 10.2.3.0/24, 192.168.0.2/32, 10.4.0.2/32 +`), + b: []byte(``), + out: false, + }, + } { + equal, err := CompareConf(tc.a, tc.b) + if err != nil { + t.Errorf("test case %q: got unexpected error: %v", tc.name, err) + } + if equal != tc.out { + t.Errorf("test case %q: expected %t, got %t", tc.name, tc.out, equal) + } + } +} diff --git a/vendor/github.com/awalterschulze/gographviz/.travis.yml b/vendor/github.com/awalterschulze/gographviz/.travis.yml new file mode 100644 index 0000000..fe59810 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/.travis.yml @@ -0,0 +1,10 @@ +before_install: + - ./install-godeps.sh + +script: + - make travis + +language: go + +go: + - 1.8 diff --git a/vendor/github.com/awalterschulze/gographviz/AUTHORS b/vendor/github.com/awalterschulze/gographviz/AUTHORS new file mode 100644 index 0000000..fa0713a --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGraphviz authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Vastech SA (PTY) LTD +Xavier Chassin +Walter Schulze diff --git a/vendor/github.com/awalterschulze/gographviz/CONTRIBUTORS b/vendor/github.com/awalterschulze/gographviz/CONTRIBUTORS new file mode 100644 index 0000000..3bbeed3 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/CONTRIBUTORS @@ -0,0 +1,5 @@ +Robin Eklind +Walter Schulze +Xuanyi Chew +Nathan Kitchen +Ruud Kamphuis diff --git a/vendor/github.com/awalterschulze/gographviz/LICENSE b/vendor/github.com/awalterschulze/gographviz/LICENSE new file mode 100644 index 0000000..6259ffd --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/LICENSE @@ -0,0 +1,46 @@ +Copyright 2013 GoGraphviz Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------------------------- +Portions of gocc's source code has been derived from Go, and are covered by the +following license: +------------------------------------------------------------------------------- + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/awalterschulze/gographviz/Makefile b/vendor/github.com/awalterschulze/gographviz/Makefile new file mode 100644 index 0000000..0e1bdb4 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/Makefile @@ -0,0 +1,16 @@ +regenerate: + go install github.com/goccmack/gocc + gocc -zip -o ./internal/ dot.bnf + find . -type f -name '*.go' | xargs goimports -w + +test: + go test ./... + +travis: + make regenerate + go build ./... + go test ./... + errcheck -ignore 'fmt:[FS]?[Pp]rint*' ./... + gofmt -l -s -w . + golint -set_exit_status + git diff --exit-code diff --git a/vendor/github.com/awalterschulze/gographviz/Readme.md b/vendor/github.com/awalterschulze/gographviz/Readme.md new file mode 100644 index 0000000..3d7c4fb --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/Readme.md @@ -0,0 +1,39 @@ +Parses the Graphviz DOT language and creates an interface, in golang, with which to easily create new and manipulate existing graphs which can be written back to the DOT format. + +This parser has been created using [gocc](http://code.google.com/p/gocc). + +### Example (Parse and Edit) ### + +``` +graphAst, _ := gographviz.ParseString(`digraph G {}`) +graph := gographviz.NewGraph() +if err := gographviz.Analyse(graphAst, graph); err != nil { + panic(err) +} +graph.AddNode("G", "a", nil) +graph.AddNode("G", "b", nil) +graph.AddEdge("a", "b", true, nil) +output := graph.String() +``` + +### Documentation ### + +The [godoc](https://godoc.org/github.com/awalterschulze/gographviz) includes some more examples. + +### Installation ### +go get github.com/awalterschulze/gographviz + +### Tests ### + +[![Build Status](https://travis-ci.org/awalterschulze/gographviz.svg?branch=master)](https://travis-ci.org/awalterschulze/gographviz) + +### Users ### + + - [aptly](https://github.com/smira/aptly) - Debian repository management tool + - [gorgonia](https://github.com/chewxy/gorgonia) - A Library that helps facilitate machine learning in Go + - [imagemonkey](https://imagemonkey.io/graph?editor=true) - Let's create our own image dataset + - [depviz](https://github.com/moul/depviz) - GitHub dependency visualizer (auto-roadmap) + +### Mentions ### + +[Using Golang and GraphViz to Visualize Complex Grails Applications](http://ilikeorangutans.github.io/2014/05/03/using-golang-and-graphviz-to-visualize-complex-grails-applications/) diff --git a/vendor/github.com/awalterschulze/gographviz/analyse.go b/vendor/github.com/awalterschulze/gographviz/analyse.go new file mode 100644 index 0000000..fd9e25a --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/analyse.go @@ -0,0 +1,188 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import ( + "github.com/awalterschulze/gographviz/ast" +) + +// NewAnalysedGraph creates a Graph structure by analysing an Abstract Syntax Tree representing a parsed graph. +func NewAnalysedGraph(graph *ast.Graph) (*Graph, error) { + g := NewGraph() + if err := Analyse(graph, g); err != nil { + return nil, err + } + return g, nil +} + +// Analyse analyses an Abstract Syntax Tree representing a parsed graph into a newly created graph structure Interface. +func Analyse(graph *ast.Graph, g Interface) error { + gerr := newErrCatcher(g) + graph.Walk(&graphVisitor{gerr}) + return gerr.getError() +} + +type nilVisitor struct { +} + +func (w *nilVisitor) Visit(v ast.Elem) ast.Visitor { + return w +} + +type graphVisitor struct { + g errInterface +} + +func (w *graphVisitor) Visit(v ast.Elem) ast.Visitor { + graph, ok := v.(*ast.Graph) + if !ok { + return w + } + w.g.SetStrict(graph.Strict) + w.g.SetDir(graph.Type == ast.DIGRAPH) + graphName := graph.ID.String() + w.g.SetName(graphName) + return newStmtVisitor(w.g, graphName, nil, nil) +} + +func newStmtVisitor(g errInterface, graphName string, nodeAttrs, edgeAttrs map[string]string) *stmtVisitor { + nodeAttrs = ammend(make(map[string]string), nodeAttrs) + edgeAttrs = ammend(make(map[string]string), edgeAttrs) + return &stmtVisitor{g, graphName, nodeAttrs, edgeAttrs, make(map[string]string), make(map[string]struct{})} +} + +type stmtVisitor struct { + g errInterface + graphName string + currentNodeAttrs map[string]string + currentEdgeAttrs map[string]string + currentGraphAttrs map[string]string + createdNodes map[string]struct{} +} + +func (w *stmtVisitor) Visit(v ast.Elem) ast.Visitor { + switch s := v.(type) { + case ast.NodeStmt: + return w.nodeStmt(s) + case ast.EdgeStmt: + return w.edgeStmt(s) + case ast.NodeAttrs: + return w.nodeAttrs(s) + case ast.EdgeAttrs: + return w.edgeAttrs(s) + case ast.GraphAttrs: + return w.graphAttrs(s) + case *ast.SubGraph: + return w.subGraph(s) + case *ast.Attr: + return w.attr(s) + case ast.AttrList: + return &nilVisitor{} + default: + //fmt.Fprintf(os.Stderr, "unknown stmt %T\n", v) + } + return w +} + +func ammend(attrs map[string]string, add map[string]string) map[string]string { + for key, value := range add { + if _, ok := attrs[key]; !ok { + attrs[key] = value + } + } + return attrs +} + +func overwrite(attrs map[string]string, overwrite map[string]string) map[string]string { + for key, value := range overwrite { + attrs[key] = value + } + return attrs +} + +func (w *stmtVisitor) addNodeFromEdge(nodeID string) { + if _, ok := w.createdNodes[nodeID]; !ok { + w.createdNodes[nodeID] = struct{}{} + w.g.AddNode(w.graphName, nodeID, w.currentNodeAttrs) + } +} + +func (w *stmtVisitor) nodeStmt(stmt ast.NodeStmt) ast.Visitor { + nodeID := stmt.NodeID.String() + var defaultAttrs map[string]string + if _, ok := w.createdNodes[nodeID]; !ok { + defaultAttrs = w.currentNodeAttrs + w.createdNodes[nodeID] = struct{}{} + } + // else the defaults were already inherited + attrs := ammend(stmt.Attrs.GetMap(), defaultAttrs) + w.g.AddNode(w.graphName, nodeID, attrs) + return &nilVisitor{} +} + +func (w *stmtVisitor) edgeStmt(stmt ast.EdgeStmt) ast.Visitor { + attrs := stmt.Attrs.GetMap() + attrs = ammend(attrs, w.currentEdgeAttrs) + src := stmt.Source.GetID() + srcName := src.String() + if stmt.Source.IsNode() { + w.addNodeFromEdge(srcName) + } + srcPort := stmt.Source.GetPort() + for i := range stmt.EdgeRHS { + directed := bool(stmt.EdgeRHS[i].Op) + dst := stmt.EdgeRHS[i].Destination.GetID() + dstName := dst.String() + if stmt.EdgeRHS[i].Destination.IsNode() { + w.addNodeFromEdge(dstName) + } + dstPort := stmt.EdgeRHS[i].Destination.GetPort() + w.g.AddPortEdge(srcName, srcPort.String(), dstName, dstPort.String(), directed, attrs) + src = dst + srcPort = dstPort + srcName = dstName + } + return w +} + +func (w *stmtVisitor) nodeAttrs(stmt ast.NodeAttrs) ast.Visitor { + w.currentNodeAttrs = overwrite(w.currentNodeAttrs, ast.AttrList(stmt).GetMap()) + return &nilVisitor{} +} + +func (w *stmtVisitor) edgeAttrs(stmt ast.EdgeAttrs) ast.Visitor { + w.currentEdgeAttrs = overwrite(w.currentEdgeAttrs, ast.AttrList(stmt).GetMap()) + return &nilVisitor{} +} + +func (w *stmtVisitor) graphAttrs(stmt ast.GraphAttrs) ast.Visitor { + attrs := ast.AttrList(stmt).GetMap() + for key, value := range attrs { + w.g.AddAttr(w.graphName, key, value) + } + w.currentGraphAttrs = overwrite(w.currentGraphAttrs, attrs) + return &nilVisitor{} +} + +func (w *stmtVisitor) subGraph(stmt *ast.SubGraph) ast.Visitor { + subGraphName := stmt.ID.String() + w.g.AddSubGraph(w.graphName, subGraphName, w.currentGraphAttrs) + return newStmtVisitor(w.g, subGraphName, w.currentNodeAttrs, w.currentEdgeAttrs) +} + +func (w *stmtVisitor) attr(stmt *ast.Attr) ast.Visitor { + w.g.AddAttr(w.graphName, stmt.Field.String(), stmt.Value.String()) + return w +} diff --git a/vendor/github.com/awalterschulze/gographviz/ast/ast.go b/vendor/github.com/awalterschulze/gographviz/ast/ast.go new file mode 100644 index 0000000..d2ecfdd --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/ast/ast.go @@ -0,0 +1,684 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +//Abstract Syntax Tree representing the DOT grammar +package ast + +import ( + "errors" + "fmt" + "math/rand" + "sort" + "strings" + + "github.com/awalterschulze/gographviz/internal/token" +) + +var ( + r = rand.New(rand.NewSource(1234)) +) + +type Visitor interface { + Visit(e Elem) Visitor +} + +type Elem interface { + String() string +} + +type Walkable interface { + Walk(v Visitor) +} + +type Attrib interface{} + +type Bool bool + +const ( + FALSE = Bool(false) + TRUE = Bool(true) +) + +func (this Bool) String() string { + if this { + return "true" + } + return "false" +} + +func (this Bool) Walk(v Visitor) { + if v == nil { + return + } + v.Visit(this) +} + +type GraphType bool + +const ( + GRAPH = GraphType(false) + DIGRAPH = GraphType(true) +) + +func (this GraphType) String() string { + if this { + return "digraph" + } + return "graph" +} + +func (this GraphType) Walk(v Visitor) { + if v == nil { + return + } + v.Visit(this) +} + +type Graph struct { + Type GraphType + Strict bool + ID ID + StmtList StmtList +} + +func NewGraph(t, strict, id, l Attrib) (*Graph, error) { + g := &Graph{Type: t.(GraphType), Strict: bool(strict.(Bool)), ID: ID("")} + if id != nil { + g.ID = id.(ID) + } + if l != nil { + g.StmtList = l.(StmtList) + } + return g, nil +} + +func (this *Graph) String() string { + var s string + if this.Strict { + s += "strict " + } + s += this.Type.String() + " " + this.ID.String() + " {\n" + if this.StmtList != nil { + s += this.StmtList.String() + } + s += "\n}\n" + return s +} + +func (this *Graph) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + this.Type.Walk(v) + this.ID.Walk(v) + this.StmtList.Walk(v) +} + +type StmtList []Stmt + +func NewStmtList(s Attrib) (StmtList, error) { + ss := make(StmtList, 1) + ss[0] = s.(Stmt) + return ss, nil +} + +func AppendStmtList(ss, s Attrib) (StmtList, error) { + this := ss.(StmtList) + this = append(this, s.(Stmt)) + return this, nil +} + +func (this StmtList) String() string { + if len(this) == 0 { + return "" + } + s := "" + for i := 0; i < len(this); i++ { + ss := this[i].String() + if len(ss) > 0 { + s += "\t" + ss + ";\n" + } + } + return s +} + +func (this StmtList) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + for i := range this { + this[i].Walk(v) + } +} + +type Stmt interface { + Elem + Walkable + isStmt() +} + +func (this NodeStmt) isStmt() {} +func (this EdgeStmt) isStmt() {} +func (this EdgeAttrs) isStmt() {} +func (this NodeAttrs) isStmt() {} +func (this GraphAttrs) isStmt() {} +func (this *SubGraph) isStmt() {} +func (this *Attr) isStmt() {} + +type SubGraph struct { + ID ID + StmtList StmtList +} + +func NewSubGraph(id, l Attrib) (*SubGraph, error) { + g := &SubGraph{ID: ID(fmt.Sprintf("anon%d", r.Int63()))} + if id != nil { + if len(id.(ID)) > 0 { + g.ID = id.(ID) + } + } + if l != nil { + g.StmtList = l.(StmtList) + } + return g, nil +} + +func (this *SubGraph) GetID() ID { + return this.ID +} + +func (this *SubGraph) GetPort() Port { + return NewPort(nil, nil) +} + +func (this *SubGraph) String() string { + gName := this.ID.String() + if strings.HasPrefix(gName, "anon") { + gName = "" + } + s := "subgraph " + this.ID.String() + " {\n" + if this.StmtList != nil { + s += this.StmtList.String() + } + s += "\n}\n" + return s +} + +func (this *SubGraph) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + this.ID.Walk(v) + this.StmtList.Walk(v) +} + +type EdgeAttrs AttrList + +func NewEdgeAttrs(a Attrib) (EdgeAttrs, error) { + return EdgeAttrs(a.(AttrList)), nil +} + +func (this EdgeAttrs) String() string { + s := AttrList(this).String() + if len(s) == 0 { + return "" + } + return `edge ` + s +} + +func (this EdgeAttrs) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + for i := range this { + this[i].Walk(v) + } +} + +type NodeAttrs AttrList + +func NewNodeAttrs(a Attrib) (NodeAttrs, error) { + return NodeAttrs(a.(AttrList)), nil +} + +func (this NodeAttrs) String() string { + s := AttrList(this).String() + if len(s) == 0 { + return "" + } + return `node ` + s +} + +func (this NodeAttrs) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + for i := range this { + this[i].Walk(v) + } +} + +type GraphAttrs AttrList + +func NewGraphAttrs(a Attrib) (GraphAttrs, error) { + return GraphAttrs(a.(AttrList)), nil +} + +func (this GraphAttrs) String() string { + s := AttrList(this).String() + if len(s) == 0 { + return "" + } + return `graph ` + s +} + +func (this GraphAttrs) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + for i := range this { + this[i].Walk(v) + } +} + +type AttrList []AList + +func NewAttrList(a Attrib) (AttrList, error) { + as := make(AttrList, 0) + if a != nil { + as = append(as, a.(AList)) + } + return as, nil +} + +func AppendAttrList(as, a Attrib) (AttrList, error) { + this := as.(AttrList) + if a == nil { + return this, nil + } + this = append(this, a.(AList)) + return this, nil +} + +func (this AttrList) String() string { + s := "" + for _, alist := range this { + ss := alist.String() + if len(ss) > 0 { + s += "[ " + ss + " ] " + } + } + if len(s) == 0 { + return "" + } + return s +} + +func (this AttrList) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + for i := range this { + this[i].Walk(v) + } +} + +func PutMap(attrmap map[string]string) AttrList { + attrlist := make(AttrList, 1) + attrlist[0] = make(AList, 0) + keys := make([]string, 0, len(attrmap)) + for key := range attrmap { + keys = append(keys, key) + } + sort.Strings(keys) + for _, name := range keys { + value := attrmap[name] + attrlist[0] = append(attrlist[0], &Attr{ID(name), ID(value)}) + } + return attrlist +} + +func (this AttrList) GetMap() map[string]string { + attrs := make(map[string]string) + for _, alist := range this { + for _, attr := range alist { + attrs[attr.Field.String()] = attr.Value.String() + } + } + return attrs +} + +type AList []*Attr + +func NewAList(a Attrib) (AList, error) { + as := make(AList, 1) + as[0] = a.(*Attr) + return as, nil +} + +func AppendAList(as, a Attrib) (AList, error) { + this := as.(AList) + attr := a.(*Attr) + this = append(this, attr) + return this, nil +} + +func (this AList) String() string { + if len(this) == 0 { + return "" + } + str := this[0].String() + for i := 1; i < len(this); i++ { + str += `, ` + this[i].String() + } + return str +} + +func (this AList) Walk(v Visitor) { + v = v.Visit(this) + for i := range this { + this[i].Walk(v) + } +} + +type Attr struct { + Field ID + Value ID +} + +func NewAttr(f, v Attrib) (*Attr, error) { + a := &Attr{Field: f.(ID)} + a.Value = ID("true") + if v != nil { + ok := false + a.Value, ok = v.(ID) + if !ok { + return nil, errors.New(fmt.Sprintf("value = %v", v)) + } + } + return a, nil +} + +func (this *Attr) String() string { + return this.Field.String() + `=` + this.Value.String() +} + +func (this *Attr) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + this.Field.Walk(v) + this.Value.Walk(v) +} + +type Location interface { + Elem + Walkable + isLocation() + GetID() ID + GetPort() Port + IsNode() bool +} + +func (this *NodeID) isLocation() {} +func (this *NodeID) IsNode() bool { return true } +func (this *SubGraph) isLocation() {} +func (this *SubGraph) IsNode() bool { return false } + +type EdgeStmt struct { + Source Location + EdgeRHS EdgeRHS + Attrs AttrList +} + +func NewEdgeStmt(id, e, attrs Attrib) (*EdgeStmt, error) { + var a AttrList = nil + var err error = nil + if attrs == nil { + a, err = NewAttrList(nil) + if err != nil { + return nil, err + } + } else { + a = attrs.(AttrList) + } + return &EdgeStmt{id.(Location), e.(EdgeRHS), a}, nil +} + +func (this EdgeStmt) String() string { + return strings.TrimSpace(this.Source.String() + this.EdgeRHS.String() + this.Attrs.String()) +} + +func (this EdgeStmt) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + this.Source.Walk(v) + this.EdgeRHS.Walk(v) + this.Attrs.Walk(v) +} + +type EdgeRHS []*EdgeRH + +func NewEdgeRHS(op, id Attrib) (EdgeRHS, error) { + return EdgeRHS{&EdgeRH{op.(EdgeOp), id.(Location)}}, nil +} + +func AppendEdgeRHS(e, op, id Attrib) (EdgeRHS, error) { + erhs := e.(EdgeRHS) + erhs = append(erhs, &EdgeRH{op.(EdgeOp), id.(Location)}) + return erhs, nil +} + +func (this EdgeRHS) String() string { + s := "" + for i := range this { + s += this[i].String() + } + return strings.TrimSpace(s) +} + +func (this EdgeRHS) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + for i := range this { + this[i].Walk(v) + } +} + +type EdgeRH struct { + Op EdgeOp + Destination Location +} + +func (this *EdgeRH) String() string { + return strings.TrimSpace(this.Op.String() + this.Destination.String()) +} + +func (this *EdgeRH) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + this.Op.Walk(v) + this.Destination.Walk(v) +} + +type NodeStmt struct { + NodeID *NodeID + Attrs AttrList +} + +func NewNodeStmt(id, attrs Attrib) (*NodeStmt, error) { + nid := id.(*NodeID) + var a AttrList = nil + var err error = nil + if attrs == nil { + a, err = NewAttrList(nil) + if err != nil { + return nil, err + } + } else { + a = attrs.(AttrList) + } + return &NodeStmt{nid, a}, nil +} + +func (this NodeStmt) String() string { + return strings.TrimSpace(this.NodeID.String() + ` ` + this.Attrs.String()) +} + +func (this NodeStmt) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + this.NodeID.Walk(v) + this.Attrs.Walk(v) +} + +type EdgeOp bool + +const ( + DIRECTED EdgeOp = true + UNDIRECTED EdgeOp = false +) + +func (this EdgeOp) String() string { + if this == DIRECTED { + return "->" + } + return "--" +} + +func (this EdgeOp) Walk(v Visitor) { + if v == nil { + return + } + v.Visit(this) +} + +type NodeID struct { + ID ID + Port Port +} + +func NewNodeID(id, port Attrib) (*NodeID, error) { + if port == nil { + return &NodeID{id.(ID), Port{"", ""}}, nil + } + return &NodeID{id.(ID), port.(Port)}, nil +} + +func MakeNodeID(id string, port string) *NodeID { + p := Port{"", ""} + if len(port) > 0 { + ps := strings.Split(port, ":") + p.ID1 = ID(ps[0]) + if len(ps) > 1 { + p.ID2 = ID(ps[1]) + } + } + return &NodeID{ID(id), p} +} + +func (this *NodeID) String() string { + return this.ID.String() + this.Port.String() +} + +func (this *NodeID) GetID() ID { + return this.ID +} + +func (this *NodeID) GetPort() Port { + return this.Port +} + +func (this *NodeID) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + this.ID.Walk(v) + this.Port.Walk(v) +} + +//TODO semantic analysis should decide which ID is an ID and which is a Compass Point +type Port struct { + ID1 ID + ID2 ID +} + +func NewPort(id1, id2 Attrib) Port { + port := Port{ID(""), ID("")} + if id1 != nil { + port.ID1 = id1.(ID) + } + if id2 != nil { + port.ID2 = id2.(ID) + } + return port +} + +func (this Port) String() string { + if len(this.ID1) == 0 { + return "" + } + s := ":" + this.ID1.String() + if len(this.ID2) > 0 { + s += ":" + this.ID2.String() + } + return s +} + +func (this Port) Walk(v Visitor) { + if v == nil { + return + } + v = v.Visit(this) + this.ID1.Walk(v) + this.ID2.Walk(v) +} + +type ID string + +func NewID(id Attrib) (ID, error) { + if id == nil { + return ID(""), nil + } + id_lit := string(id.(*token.Token).Lit) + return ID(id_lit), nil +} + +func (this ID) String() string { + return string(this) +} + +func (this ID) Walk(v Visitor) { + if v == nil { + return + } + v.Visit(this) +} diff --git a/vendor/github.com/awalterschulze/gographviz/attr.go b/vendor/github.com/awalterschulze/gographviz/attr.go new file mode 100644 index 0000000..35004fa --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/attr.go @@ -0,0 +1,559 @@ +//Copyright 2017 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http)://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import "fmt" + +// Attr is an attribute key +type Attr string + +// NewAttr creates a new attribute key by checking whether it is a valid key +func NewAttr(key string) (Attr, error) { + a, ok := validAttrs[key] + if !ok { + return Attr(""), fmt.Errorf("%s is not a valid attribute", key) + } + return a, nil +} + +const ( + // Damping http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:Damping + Damping Attr = "Damping" + // K http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:K + K Attr = "K" + // URL http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:URL + URL Attr = "URL" + // Background http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:_background + Background Attr = "_background" + // Area http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:area + Area Attr = "area" + // ArrowHead http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:arrowhead + ArrowHead Attr = "arrowhead" + // ArrowSize http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:arrowsize + ArrowSize Attr = "arrowsize" + // ArrowTail http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:arrowtail + ArrowTail Attr = "arrowtail" + // BB http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:bb + BB Attr = "bb" + // BgColor http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:bgcolor + BgColor Attr = "bgcolor" + // Center http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:center + Center Attr = "center" + // Charset http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:charset + Charset Attr = "charset" + // ClusterRank http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:clusterrank + ClusterRank Attr = "clusterrank" + // Color http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:color + Color Attr = "color" + // ColorScheme http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:colorscheme + ColorScheme Attr = "colorscheme" + // Comment http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:comment + Comment Attr = "comment" + // Compound http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:compound + Compound Attr = "compound" + // Concentrate http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:concentrate + Concentrate Attr = "concentrate" + // Constraint http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:constraint + Constraint Attr = "constraint" + // Decorate http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:decorate + Decorate Attr = "decorate" + // DefaultDist http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:defaultdist + DefaultDist Attr = "defaultdist" + // Dim http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:dim + Dim Attr = "dim" + // Dimen http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:dimen + Dimen Attr = "dimen" + // Dir http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:dir + Dir Attr = "dir" + // DirEdgeConstraints http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:dir + DirEdgeConstraints Attr = "diredgeconstraints" + // Distortion http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:distortion + Distortion Attr = "distortion" + // DPI http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:dpi + DPI Attr = "dpi" + // EdgeURL http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d::edgeURL + EdgeURL Attr = "edgeURL" + // EdgeHREF http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d::edgehref + EdgeHREF Attr = "edgehref" + // EdgeTarget http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d::edgetarget + EdgeTarget Attr = "edgetarget" + // EdgeTooltip http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d::edgetooltip + EdgeTooltip Attr = "edgetooltip" + // Epsilon http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d::epsilon + Epsilon Attr = "epsilon" + // ESep http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d::epsilon + ESep Attr = "esep" + // FillColor http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:fillcolor + FillColor Attr = "fillcolor" + // FixedSize http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:fixedsize + FixedSize Attr = "fixedsize" + // FontColor http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:fontcolor + FontColor Attr = "fontcolor" + // FontName http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:fontname + FontName Attr = "fontname" + // FontNames http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:fontnames + FontNames Attr = "fontnames" + // FontPath http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:fontpath + FontPath Attr = "fontpath" + // FontSize http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:fontsize + FontSize Attr = "fontsize" + // ForceLabels http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:forcelabels + ForceLabels Attr = "forcelabels" + // GradientAngle http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:gradientangle + GradientAngle Attr = "gradientangle" + // Group http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:group + Group Attr = "group" + // HeadURL http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:headURL + HeadURL Attr = "headURL" + // HeadLP http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:head_lp + HeadLP Attr = "head_lp" + // HeadClip http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:headclip + HeadClip Attr = "headclip" + // HeadHREF http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:headhref + HeadHREF Attr = "headhref" + // HeadLabel http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:headlabel + HeadLabel Attr = "headlabel" + // HeadPort http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:headport + HeadPort Attr = "headport" + // HeadTarget http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:headtarget + HeadTarget Attr = "headtarget" + // HeadTooltip http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:headtooltip + HeadTooltip Attr = "headtooltip" + // Height http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:height + Height Attr = "height" + // HREF http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:href + HREF Attr = "href" + // ID http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:id + ID Attr = "id" + // Image http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:image + Image Attr = "image" + // ImagePath http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:imagepath + ImagePath Attr = "imagepath" + // ImageScale http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:imagescale + ImageScale Attr = "imagescale" + // InputScale http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:inputscale + InputScale Attr = "inputscale" + // Label http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:label + Label Attr = "label" + // LabelURL http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labelURL + LabelURL Attr = "labelURL" + // LabelScheme http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:label_scheme + LabelScheme Attr = "label_scheme" + // LabelAngle http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labelangle + LabelAngle Attr = "labelangle" + // LabelDistance http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labeldistance + LabelDistance Attr = "labeldistance" + // LabelFloat http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labelfloat + LabelFloat Attr = "labelfloat" + // LabelFontColor http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labelfontcolor + LabelFontColor Attr = "labelfontcolor" + // LabelFontName http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labelfontname + LabelFontName Attr = "labelfontname" + // LabelFontSize http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labelfontsize + LabelFontSize Attr = "labelfontsize" + // LabelHREF http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labelhref + LabelHREF Attr = "labelhref" + // LabelJust http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labeljust + LabelJust Attr = "labeljust" + // LabelLOC http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labelloc + LabelLOC Attr = "labelloc" + // LabelTarget http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labeltarget + LabelTarget Attr = "labeltarget" + // LabelTooltip http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:labeltooltip + LabelTooltip Attr = "labeltooltip" + // Landscape http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:landscape + Landscape Attr = "landscape" + // Layer http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:layer + Layer Attr = "layer" + // LayerListSep http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:layerlistsep + LayerListSep Attr = "layerlistsep" + // Layers http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:layers + Layers Attr = "layers" + // LayerSelect http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:layerselect + LayerSelect Attr = "layerselect" + // LayerSep http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:layersep + LayerSep Attr = "layersep" + // Layout http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:layout + Layout Attr = "layout" + // Len http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:len + Len Attr = "len" + // Levels http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:levels + Levels Attr = "levels" + // LevelsGap http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:levelsgap + LevelsGap Attr = "levelsgap" + // LHead http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:lhead + LHead Attr = "lhead" + // LHeight http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:lheight + LHeight Attr = "lheight" + // LP http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:lp + LP Attr = "lp" + // LTail http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:ltail + LTail Attr = "ltail" + // LWidth http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:lwidth + LWidth Attr = "lwidth" + // Margin http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:margin + Margin Attr = "margin" + // MaxIter http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:maxiter + MaxIter Attr = "maxiter" + // MCLimit http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:mclimit + MCLimit Attr = "mclimit" + // MinDist http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:mindist + MinDist Attr = "mindist" + // MinLen http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:mindist + MinLen Attr = "minlen" + // Mode http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:mode + Mode Attr = "mode" + // Model http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:model + Model Attr = "model" + // Mosek http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:mosek + Mosek Attr = "mosek" + // NewRank http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:newrank + NewRank Attr = "newrank" + // NodeSep http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:nodesep + NodeSep Attr = "nodesep" + // NoJustify http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:nojustify + NoJustify Attr = "nojustify" + // Normalize http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:normalize + Normalize Attr = "normalize" + // NoTranslate http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:notranslate + NoTranslate Attr = "notranslate" + // NSLimit http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:nslimit + NSLimit Attr = "nslimit" + // NSLimit1 http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:nslimit1 + NSLimit1 Attr = "nslimit1" + // Ordering http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:nslimit1 + Ordering Attr = "ordering" + // Orientation http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:orientation + Orientation Attr = "orientation" + // OutputOrder http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:outputorder + OutputOrder Attr = "outputorder" + // Overlap http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:overlap + Overlap Attr = "overlap" + // OverlapScaling http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:overlap_scaling + OverlapScaling Attr = "overlap_scaling" + // OverlapShrink http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:overlap_shrink + OverlapShrink Attr = "overlap_shrink" + // Pack http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:pack + Pack Attr = "pack" + // PackMode http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:packmode + PackMode Attr = "packmode" + // Pad http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:pad + Pad Attr = "pad" + // Page http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:page + Page Attr = "page" + // PageDir http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:pagedir + PageDir Attr = "pagedir" + // PenColor http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:pencolor + PenColor Attr = "pencolor" + // PenWidth http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:penwidth + PenWidth Attr = "penwidth" + // Peripheries http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:peripheries + Peripheries Attr = "peripheries" + // Pin http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:peripheries + Pin Attr = "pin" + // Pos http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:pos + Pos Attr = "pos" + // QuadTree http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:quadtree + QuadTree Attr = "quadtree" + // Quantum http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:quantum + Quantum Attr = "quantum" + // Rank http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:rank + Rank Attr = "rank" + // RankDir http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:rankdir + RankDir Attr = "rankdir" + // RankSep http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:ranksep + RankSep Attr = "ranksep" + // Ratio http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:ratio + Ratio Attr = "ratio" + // Rects http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:rects + Rects Attr = "rects" + // Regular http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:regular + Regular Attr = "regular" + // ReMinCross http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:remincross + ReMinCross Attr = "remincross" + // RepulsiveForce http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:repulsiveforce + RepulsiveForce Attr = "repulsiveforce" + // Resolution http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:resolution + Resolution Attr = "resolution" + // Root http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:root + Root Attr = "root" + // Rotate http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:rotate + Rotate Attr = "rotate" + // Rotation http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:rotation + Rotation Attr = "rotation" + // SameHead http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:samehead + SameHead Attr = "samehead" + // SameTail http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:sametail + SameTail Attr = "sametail" + // SamplePoints http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:samplepoints + SamplePoints Attr = "samplepoints" + // Scale http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:scale + Scale Attr = "scale" + // SearchSize http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:searchsize + SearchSize Attr = "searchsize" + // Sep http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:sep + Sep Attr = "sep" + // Shape http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:shape + Shape Attr = "shape" + // ShapeFile http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:shapefile + ShapeFile Attr = "shapefile" + // ShowBoxes http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:showboxes + ShowBoxes Attr = "showboxes" + // Sides http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:sides + Sides Attr = "sides" + // Size http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:size + Size Attr = "size" + // Skew http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:skew + Skew Attr = "skew" + // Smoothing http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:smoothing + Smoothing Attr = "smoothing" + // SortV http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:sortv + SortV Attr = "sortv" + // Splines http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:splines + Splines Attr = "splines" + // Start http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:start + Start Attr = "start" + // Style http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:style + Style Attr = "style" + // StyleSheet http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:stylesheet + StyleSheet Attr = "stylesheet" + // TailURL http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:tailURL + TailURL Attr = "tailURL" + // TailLP http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:tail_lp + TailLP Attr = "tail_lp" + // TailClip http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:tailclip + TailClip Attr = "tailclip" + // TailHREF http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:tailhref + TailHREF Attr = "tailhref" + // TailLabel http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:taillabel + TailLabel Attr = "taillabel" + // TailPort http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:tailport + TailPort Attr = "tailport" + // TailTarget http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:tailtarget + TailTarget Attr = "tailtarget" + // TailTooltip http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:tailtooltip + TailTooltip Attr = "tailtooltip" + // Target http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:target + Target Attr = "target" + // Tooltip http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:tooltip + Tooltip Attr = "tooltip" + // TrueColor http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:tooltip + TrueColor Attr = "truecolor" + // Vertices http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:vertices + Vertices Attr = "vertices" + // ViewPort http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:viewport + ViewPort Attr = "viewport" + // VoroMargin http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:voro_margin + VoroMargin Attr = "voro_margin" + // Weight http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:weight + Weight Attr = "weight" + // Width http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:width + Width Attr = "width" + // XDotVersion http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:xdotversion + XDotVersion Attr = "xdotversion" + // XLabel http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:xlabel + XLabel Attr = "xlabel" + // XLP http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:xlp + XLP Attr = "xlp" + // Z http://graphviz.gitlab.io/_pages/doc/info/attrs.html#d:z + Z Attr = "z" + + // MinCross is not in the documentation, but found in the Ped_Lion_Share (lion_share.gv.txt) example + MinCross Attr = "mincross" + // SSize is not in the documentation, but found in the siblings.gv.txt example + SSize Attr = "ssize" + // Outline is not in the documentation, but found in the siblings.gv.txt example + Outline Attr = "outline" + // F is not in the documentation, but found in the transparency.gv.txt example + F Attr = "f" +) + +var validAttrs = map[string]Attr{ + string(Damping): Damping, + string(K): K, + string(URL): URL, + string(Background): Background, + string(Area): Area, + string(ArrowHead): ArrowHead, + string(ArrowSize): ArrowSize, + string(ArrowTail): ArrowTail, + string(BB): BB, + string(BgColor): BgColor, + string(Center): Center, + string(Charset): Charset, + string(ClusterRank): ClusterRank, + string(Color): Color, + string(ColorScheme): ColorScheme, + string(Comment): Comment, + string(Compound): Compound, + string(Concentrate): Concentrate, + string(Constraint): Constraint, + string(Decorate): Decorate, + string(DefaultDist): DefaultDist, + string(Dim): Dim, + string(Dimen): Dimen, + string(Dir): Dir, + string(DirEdgeConstraints): DirEdgeConstraints, + string(Distortion): Distortion, + string(DPI): DPI, + string(EdgeURL): EdgeURL, + string(EdgeHREF): EdgeHREF, + string(EdgeTarget): EdgeTarget, + string(EdgeTooltip): EdgeTooltip, + string(Epsilon): Epsilon, + string(ESep): ESep, + string(FillColor): FillColor, + string(FixedSize): FixedSize, + string(FontColor): FontColor, + string(FontName): FontName, + string(FontNames): FontNames, + string(FontPath): FontPath, + string(FontSize): FontSize, + string(ForceLabels): ForceLabels, + string(GradientAngle): GradientAngle, + string(Group): Group, + string(HeadURL): HeadURL, + string(HeadLP): HeadLP, + string(HeadClip): HeadClip, + string(HeadHREF): HeadHREF, + string(HeadLabel): HeadLabel, + string(HeadPort): HeadPort, + string(HeadTarget): HeadTarget, + string(HeadTooltip): HeadTooltip, + string(Height): Height, + string(HREF): HREF, + string(ID): ID, + string(Image): Image, + string(ImagePath): ImagePath, + string(ImageScale): ImageScale, + string(InputScale): InputScale, + string(Label): Label, + string(LabelURL): LabelURL, + string(LabelScheme): LabelScheme, + string(LabelAngle): LabelAngle, + string(LabelDistance): LabelDistance, + string(LabelFloat): LabelFloat, + string(LabelFontColor): LabelFontColor, + string(LabelFontName): LabelFontName, + string(LabelFontSize): LabelFontSize, + string(LabelHREF): LabelHREF, + string(LabelJust): LabelJust, + string(LabelLOC): LabelLOC, + string(LabelTarget): LabelTarget, + string(LabelTooltip): LabelTooltip, + string(Landscape): Landscape, + string(Layer): Layer, + string(LayerListSep): LayerListSep, + string(Layers): Layers, + string(LayerSelect): LayerSelect, + string(LayerSep): LayerSep, + string(Layout): Layout, + string(Len): Len, + string(Levels): Levels, + string(LevelsGap): LevelsGap, + string(LHead): LHead, + string(LHeight): LHeight, + string(LP): LP, + string(LTail): LTail, + string(LWidth): LWidth, + string(Margin): Margin, + string(MaxIter): MaxIter, + string(MCLimit): MCLimit, + string(MinDist): MinDist, + string(MinLen): MinLen, + string(Mode): Mode, + string(Model): Model, + string(Mosek): Mosek, + string(NewRank): NewRank, + string(NodeSep): NodeSep, + string(NoJustify): NoJustify, + string(Normalize): Normalize, + string(NoTranslate): NoTranslate, + string(NSLimit): NSLimit, + string(NSLimit1): NSLimit1, + string(Ordering): Ordering, + string(Orientation): Orientation, + string(OutputOrder): OutputOrder, + string(Overlap): Overlap, + string(OverlapScaling): OverlapScaling, + string(OverlapShrink): OverlapShrink, + string(Pack): Pack, + string(PackMode): PackMode, + string(Pad): Pad, + string(Page): Page, + string(PageDir): PageDir, + string(PenColor): PenColor, + string(PenWidth): PenWidth, + string(Peripheries): Peripheries, + string(Pin): Pin, + string(Pos): Pos, + string(QuadTree): QuadTree, + string(Quantum): Quantum, + string(Rank): Rank, + string(RankDir): RankDir, + string(RankSep): RankSep, + string(Ratio): Ratio, + string(Rects): Rects, + string(Regular): Regular, + string(ReMinCross): ReMinCross, + string(RepulsiveForce): RepulsiveForce, + string(Resolution): Resolution, + string(Root): Root, + string(Rotate): Rotate, + string(Rotation): Rotation, + string(SameHead): SameHead, + string(SameTail): SameTail, + string(SamplePoints): SamplePoints, + string(Scale): Scale, + string(SearchSize): SearchSize, + string(Sep): Sep, + string(Shape): Shape, + string(ShapeFile): ShapeFile, + string(ShowBoxes): ShowBoxes, + string(Sides): Sides, + string(Size): Size, + string(Skew): Skew, + string(Smoothing): Smoothing, + string(SortV): SortV, + string(Splines): Splines, + string(Start): Start, + string(Style): Style, + string(StyleSheet): StyleSheet, + string(TailURL): TailURL, + string(TailLP): TailLP, + string(TailClip): TailClip, + string(TailHREF): TailHREF, + string(TailLabel): TailLabel, + string(TailPort): TailPort, + string(TailTarget): TailTarget, + string(TailTooltip): TailTooltip, + string(Target): Target, + string(Tooltip): Tooltip, + string(TrueColor): TrueColor, + string(Vertices): Vertices, + string(ViewPort): ViewPort, + string(VoroMargin): VoroMargin, + string(Weight): Weight, + string(Width): Width, + string(XDotVersion): XDotVersion, + string(XLabel): XLabel, + string(XLP): XLP, + string(Z): Z, + + string(MinCross): MinCross, + string(SSize): SSize, + string(Outline): Outline, + string(F): F, +} diff --git a/vendor/github.com/awalterschulze/gographviz/attrs.go b/vendor/github.com/awalterschulze/gographviz/attrs.go new file mode 100644 index 0000000..a00eeb7 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/attrs.go @@ -0,0 +1,99 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import ( + "sort" +) + +// Attrs represents attributes for an Edge, Node or Graph. +type Attrs map[Attr]string + +// NewAttrs creates an empty Attributes type. +func NewAttrs(m map[string]string) (Attrs, error) { + as := make(Attrs) + for k, v := range m { + if err := as.Add(k, v); err != nil { + return nil, err + } + } + return as, nil +} + +// Add adds an attribute name and value. +func (attrs Attrs) Add(field string, value string) error { + a, err := NewAttr(field) + if err != nil { + return err + } + attrs.add(a, value) + return nil +} + +func (attrs Attrs) add(field Attr, value string) { + attrs[field] = value +} + +// Extend adds the attributes into attrs Attrs type overwriting duplicates. +func (attrs Attrs) Extend(more Attrs) { + for key, value := range more { + attrs.add(key, value) + } +} + +// Ammend only adds the missing attributes to attrs Attrs type. +func (attrs Attrs) Ammend(more Attrs) { + for key, value := range more { + if _, ok := attrs[key]; !ok { + attrs.add(key, value) + } + } +} + +func (attrs Attrs) toMap() map[string]string { + m := make(map[string]string) + for k, v := range attrs { + m[string(k)] = v + } + return m +} + +type attrList []Attr + +func (attrs attrList) Len() int { return len(attrs) } +func (attrs attrList) Less(i, j int) bool { + return attrs[i] < attrs[j] +} +func (attrs attrList) Swap(i, j int) { + attrs[i], attrs[j] = attrs[j], attrs[i] +} + +func (attrs Attrs) sortedNames() []Attr { + keys := make(attrList, 0) + for key := range attrs { + keys = append(keys, key) + } + sort.Sort(keys) + return []Attr(keys) +} + +// Copy returns a copy of the attributes map +func (attrs Attrs) Copy() Attrs { + mm := make(Attrs) + for k, v := range attrs { + mm[k] = v + } + return mm +} diff --git a/vendor/github.com/awalterschulze/gographviz/catch.go b/vendor/github.com/awalterschulze/gographviz/catch.go new file mode 100644 index 0000000..7a4ed11 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/catch.go @@ -0,0 +1,101 @@ +//Copyright 2017 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import ( + "fmt" + "strings" +) + +type errInterface interface { + SetStrict(strict bool) + SetDir(directed bool) + SetName(name string) + AddPortEdge(src, srcPort, dst, dstPort string, directed bool, attrs map[string]string) + AddEdge(src, dst string, directed bool, attrs map[string]string) + AddNode(parentGraph string, name string, attrs map[string]string) + AddAttr(parentGraph string, field, value string) + AddSubGraph(parentGraph string, name string, attrs map[string]string) + String() string + getError() error +} + +func newErrCatcher(g Interface) errInterface { + return &errCatcher{g, nil} +} + +type errCatcher struct { + Interface + errs []error +} + +func (e *errCatcher) SetStrict(strict bool) { + if err := e.Interface.SetStrict(strict); err != nil { + e.errs = append(e.errs, err) + } +} + +func (e *errCatcher) SetDir(directed bool) { + if err := e.Interface.SetDir(directed); err != nil { + e.errs = append(e.errs, err) + } +} + +func (e *errCatcher) SetName(name string) { + if err := e.Interface.SetName(name); err != nil { + e.errs = append(e.errs, err) + } +} + +func (e *errCatcher) AddPortEdge(src, srcPort, dst, dstPort string, directed bool, attrs map[string]string) { + if err := e.Interface.AddPortEdge(src, srcPort, dst, dstPort, directed, attrs); err != nil { + e.errs = append(e.errs, err) + } +} + +func (e *errCatcher) AddEdge(src, dst string, directed bool, attrs map[string]string) { + if err := e.Interface.AddEdge(src, dst, directed, attrs); err != nil { + e.errs = append(e.errs, err) + } +} + +func (e *errCatcher) AddAttr(parentGraph string, field, value string) { + if err := e.Interface.AddAttr(parentGraph, field, value); err != nil { + e.errs = append(e.errs, err) + } +} + +func (e *errCatcher) AddSubGraph(parentGraph string, name string, attrs map[string]string) { + if err := e.Interface.AddSubGraph(parentGraph, name, attrs); err != nil { + e.errs = append(e.errs, err) + } +} + +func (e *errCatcher) AddNode(parentGraph string, name string, attrs map[string]string) { + if err := e.Interface.AddNode(parentGraph, name, attrs); err != nil { + e.errs = append(e.errs, err) + } +} + +func (e *errCatcher) getError() error { + if len(e.errs) == 0 { + return nil + } + ss := make([]string, len(e.errs)) + for i, err := range e.errs { + ss[i] = err.Error() + } + return fmt.Errorf("errors: [%s]", strings.Join(ss, ",")) +} diff --git a/vendor/github.com/awalterschulze/gographviz/dot.bnf b/vendor/github.com/awalterschulze/gographviz/dot.bnf new file mode 100644 index 0000000..17fd767 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/dot.bnf @@ -0,0 +1,292 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +//This bnf has been derived from https://graphviz.gitlab.io/_pages/doc/info/lang.html +//The rules have been copied and are shown in the comments, with their derived bnf rules below. + +// ### [ Tokens ] ############################################################## + +// The keywords node, edge, graph, digraph, subgraph, and strict are case- +// independent. + +node + : 'n' 'o' 'd' 'e' + | 'N' 'o' 'd' 'e' + | 'N' 'O' 'D' 'E' +; + +edge + : 'e' 'd' 'g' 'e' + | 'E' 'd' 'g' 'e' + | 'E' 'D' 'G' 'E' +; + +// TODO: Rename graphx to graph once gocc#20 is fixed [1]. +// +// [1]: https://github.com/goccmack/gocc/issues/20 + +graphx + : 'g' 'r' 'a' 'p' 'h' + | 'G' 'r' 'a' 'p' 'h' + | 'G' 'R' 'A' 'P' 'H' +; + +digraph + : 'd' 'i' 'g' 'r' 'a' 'p' 'h' + | 'D' 'i' 'g' 'r' 'a' 'p' 'h' + | 'd' 'i' 'G' 'r' 'a' 'p' 'h' + | 'D' 'i' 'G' 'r' 'a' 'p' 'h' + | 'D' 'I' 'G' 'R' 'A' 'P' 'H' +; + +subgraph + : 's' 'u' 'b' 'g' 'r' 'a' 'p' 'h' + | 'S' 'u' 'b' 'g' 'r' 'a' 'p' 'h' + | 's' 'u' 'b' 'G' 'r' 'a' 'p' 'h' + | 'S' 'u' 'b' 'G' 'r' 'a' 'p' 'h' + | 'S' 'U' 'B' 'G' 'R' 'A' 'P' 'H' +; + +strict + : 's' 't' 'r' 'i' 'c' 't' + | 'S' 't' 'r' 'i' 'c' 't' + | 'S' 'T' 'R' 'I' 'C' 'T' +; + +// An arbitrary ASCII character except null (0x00), double quote (0x22) and +// backslash (0x5C). +_ascii_char + // skip null (0x00) + : '\x01' - '\x21' + // skip double quote (0x22) + | '\x23' - '\x5B' + // skip backslash (0x5C) + | '\x5D' - '\x7F' +; + +_ascii_letter + : 'a' - 'z' + | 'A' - 'Z' +; + +_ascii_digit : '0' - '9' ; + +_unicode_char + : _ascii_char + | _unicode_byte +; + +_unicode_byte + : '\u0080' - '\uFFFC' + // skip invalid code point (\uFFFD) + | '\uFFFE' - '\U0010FFFF' +; + +_letter : _ascii_letter | _unicode_byte | '_' ; +_decimal_digit : _ascii_digit ; +_decimals : _decimal_digit { _decimal_digit } ; + +// An ID is one of the following: +// +// 1) Any string of alphabetic ([a-zA-Z\200-\377]) characters, underscores +// ('_') or digits ([0-9]), not beginning with a digit; +// +// 2) a numeral [-]?(.[0-9]+ | [0-9]+(.[0-9]*)? ); +// +// 3) any double-quoted string ("...") possibly containing escaped quotes +// (\"); +// +// 4) an HTML string (<...>). + +id + : _letter { _letter | _decimal_digit } + | _int_lit + | _string_lit + | _html_lit +; + +_int_lit + : [ '-' ] '.' _decimals + | [ '-' ] _decimals [ '.' { _decimal_digit } ] +; + +// In quoted strings in DOT, the only escaped character is double-quote ("). +// That is, in quoted strings, the dyad \" is converted to "; all other +// characters are left unchanged. In particular, \\ remains \\. + +_escaped_char : '\\' ( _unicode_char | '"' | '\\' ) ; +_char : _unicode_char | _escaped_char ; +_string_lit : '"' { _char } '"' ; + +// An arbitrary HTML character except null (0x00), left angle bracket (0x3C) and +// right angle bracket (0x3E). +_html_char + // skip null (0x00) + : '\x01' - '\x3B' + // skip left angle bracket (0x3C) + | '\x3D' + // skip right angle bracket (0x3E) + | '\x3F' - '\xFF' +; + +_html_chars : { _html_char } ; +_html_tag : '<' _html_chars '>' ; +_html_lit : '<' { _html_chars | _html_tag } '>' ; + +// The language supports C++-style comments: /* */ and //. In addition, a line +// beginning with a '#' character is considered a line output from a C +// preprocessor (e.g., # 34 to indicate line 34 ) and discarded. + +_line_comment + : '/' '/' { . } '\n' + | '#' { . } '\n' +; + +_block_comment : '/' '*' { . | '*' } '*' '/' ; +!comment : _line_comment | _block_comment ; + +!whitespace : ' ' | '\t' | '\r' | '\n' ; + +// ### [ Syntax ] ############################################################## + +<< import "github.com/awalterschulze/gographviz/ast" >> + +//graph : [ strict ] (graph | digraph) [ ID ] '{' stmt_list '}' +DotGraph + : graphx "{" "}" << ast.NewGraph(ast.GRAPH, ast.FALSE, nil, nil) >> + | strict graphx "{" "}" << ast.NewGraph(ast.GRAPH, ast.TRUE, nil, nil) >> + | graphx Id "{" "}" << ast.NewGraph(ast.GRAPH, ast.FALSE, $1, nil) >> + | strict graphx Id "{" "}" << ast.NewGraph(ast.GRAPH, ast.TRUE, $2, nil) >> + | graphx "{" StmtList "}" << ast.NewGraph(ast.GRAPH, ast.FALSE, nil, $2) >> + | graphx Id "{" StmtList "}" << ast.NewGraph(ast.GRAPH, ast.FALSE, $1, $3) >> + | strict graphx "{" StmtList "}" << ast.NewGraph(ast.GRAPH, ast.TRUE, nil, $3) >> + | strict graphx Id "{" StmtList "}" << ast.NewGraph(ast.GRAPH, ast.TRUE, $2, $4) >> + | digraph "{" "}" << ast.NewGraph(ast.DIGRAPH, ast.FALSE, nil, nil) >> + | strict digraph "{" "}" << ast.NewGraph(ast.DIGRAPH, ast.TRUE, nil, nil) >> + | digraph Id "{" "}" << ast.NewGraph(ast.DIGRAPH, ast.FALSE, $1, nil) >> + | strict digraph Id "{" "}" << ast.NewGraph(ast.DIGRAPH, ast.TRUE, $2, nil) >> + | digraph "{" StmtList "}" << ast.NewGraph(ast.DIGRAPH, ast.FALSE, nil, $2) >> + | digraph Id "{" StmtList "}" << ast.NewGraph(ast.DIGRAPH, ast.FALSE, $1, $3) >> + | strict digraph "{" StmtList "}" << ast.NewGraph(ast.DIGRAPH, ast.TRUE, nil, $3) >> + | strict digraph Id "{" StmtList "}" << ast.NewGraph(ast.DIGRAPH, ast.TRUE, $2, $4) >> + ; + +//stmt_list : [ stmt [ ';' ] [ stmt_list ] ] +StmtList + : Stmt1 << ast.NewStmtList($0) >> + | StmtList Stmt1 << ast.AppendStmtList($0, $1) >> + ; + +Stmt1 + : Stmt << $0, nil >> + | Stmt ";" << $0, nil >> + ; + +//stmt : node_stmt | edge_stmt | attr_stmt | (ID '=' ID) | subgraph +Stmt + : Id "=" Id << ast.NewAttr($0, $2) >> + | NodeStmt << $0, nil >> + | EdgeStmt << $0, nil >> + | AttrStmt << $0, nil >> + | SubGraphStmt << $0, nil >> + ; + +//attr_stmt : (graph | node | edge) attr_list +AttrStmt + : graphx AttrList << ast.NewGraphAttrs($1) >> + | node AttrList << ast.NewNodeAttrs($1) >> + | edge AttrList << ast.NewEdgeAttrs($1) >> + ; + +//attr_list : '[' [ a_list ] ']' [ attr_list ] +AttrList + : "[" "]" << ast.NewAttrList(nil) >> + | "[" AList "]" << ast.NewAttrList($1) >> + | AttrList "[" "]" << ast.AppendAttrList($0, nil) >> + | AttrList "[" AList "]" << ast.AppendAttrList($0, $2) >> + ; + +//a_list : ID [ '=' ID ] [ ',' ] [ a_list ] +AList + : Attr << ast.NewAList($0) >> + | AList Attr << ast.AppendAList($0, $1) >> + | AList "," Attr << ast.AppendAList($0, $2) >> + ; + +//An a_list clause of the form ID is equivalent to ID=true. +Attr + : Id << ast.NewAttr($0, nil) >> + | Id "=" Id << ast.NewAttr($0, $2) >> + ; + +//edge_stmt : (node_id | subgraph) edgeRHS [ attr_list ] +EdgeStmt + : NodeId EdgeRHS << ast.NewEdgeStmt($0, $1, nil) >> + | NodeId EdgeRHS AttrList << ast.NewEdgeStmt($0, $1, $2) >> + | SubGraphStmt EdgeRHS << ast.NewEdgeStmt($0, $1, nil) >> + | SubGraphStmt EdgeRHS AttrList << ast.NewEdgeStmt($0, $1, $2) >> + ; + +//edgeRHS : edgeop (node_id | subgraph) [ edgeRHS ] +EdgeRHS + : EdgeOp NodeId << ast.NewEdgeRHS($0, $1) >> + | EdgeOp SubGraphStmt << ast.NewEdgeRHS($0, $1) >> + | EdgeRHS EdgeOp NodeId << ast.AppendEdgeRHS($0, $1, $2) >> + | EdgeRHS EdgeOp SubGraphStmt << ast.AppendEdgeRHS($0, $1, $2) >> + ; + +//node_stmt : node_id [ attr_list ] +NodeStmt + : NodeId << ast.NewNodeStmt($0, nil) >> + | NodeId AttrList << ast.NewNodeStmt($0, $1) >> + ; + +//node_id : ID [ port ] +NodeId + : Id << ast.NewNodeID($0, nil) >> + | Id Port << ast.NewNodeID($0, $1) >> + ; + +//compass_pt : (n | ne | e | se | s | sw | w | nw | c | _) +//Note also that the allowed compass point values are not keywords, +//so these strings can be used elsewhere as ordinary identifiers and, +//conversely, the parser will actually accept any identifier. +//port : ':' ID [ ':' compass_pt ] +// | ':' compass_pt +Port + : ":" Id << ast.NewPort($1, nil), nil >> + | ":" Id ":" Id << ast.NewPort($1, $3), nil >> + ; + +//TODO: Semicolons aid readability but are not required except in the rare case that a named subgraph with no body immediately preceeds an anonymous subgraph, +//since the precedence rules cause this sequence to be parsed as a subgraph with a heading and a body. Also, any amount of whitespace may be inserted between terminals. + +//subgraph : [ subgraph [ ID ] ] '{' stmt_list '}' +SubGraphStmt + : "{" StmtList "}" << ast.NewSubGraph(nil, $1) >> + | subgraph "{" StmtList "}" << ast.NewSubGraph(nil, $2) >> + | subgraph Id "{" StmtList "}" << ast.NewSubGraph($1, $3) >> + | subgraph "{" "}" << ast.NewSubGraph(nil, nil) >> + | subgraph Id "{" "}" << ast.NewSubGraph($1, nil) >> + ; + +//An edgeop is -> in directed graphs and -- in undirected graphs. +EdgeOp + : "->" << ast.DIRECTED, nil >> + | "--" << ast.UNDIRECTED, nil >> + ; + +Id + : id << ast.NewID($0) >> + ; diff --git a/vendor/github.com/awalterschulze/gographviz/edges.go b/vendor/github.com/awalterschulze/gographviz/edges.go new file mode 100644 index 0000000..bde9269 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/edges.go @@ -0,0 +1,119 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import ( + "sort" +) + +// Edge represents an Edge. +type Edge struct { + Src string + SrcPort string + Dst string + DstPort string + Dir bool + Attrs Attrs +} + +// Edges represents a set of Edges. +type Edges struct { + SrcToDsts map[string]map[string][]*Edge + DstToSrcs map[string]map[string][]*Edge + Edges []*Edge +} + +// NewEdges creates a blank set of Edges. +func NewEdges() *Edges { + return &Edges{make(map[string]map[string][]*Edge), make(map[string]map[string][]*Edge), make([]*Edge, 0)} +} + +// Add adds an Edge to the set of Edges. +func (edges *Edges) Add(edge *Edge) { + if _, ok := edges.SrcToDsts[edge.Src]; !ok { + edges.SrcToDsts[edge.Src] = make(map[string][]*Edge) + } + if _, ok := edges.SrcToDsts[edge.Src][edge.Dst]; !ok { + edges.SrcToDsts[edge.Src][edge.Dst] = make([]*Edge, 0) + } + edges.SrcToDsts[edge.Src][edge.Dst] = append(edges.SrcToDsts[edge.Src][edge.Dst], edge) + + if _, ok := edges.DstToSrcs[edge.Dst]; !ok { + edges.DstToSrcs[edge.Dst] = make(map[string][]*Edge) + } + if _, ok := edges.DstToSrcs[edge.Dst][edge.Src]; !ok { + edges.DstToSrcs[edge.Dst][edge.Src] = make([]*Edge, 0) + } + edges.DstToSrcs[edge.Dst][edge.Src] = append(edges.DstToSrcs[edge.Dst][edge.Src], edge) + + edges.Edges = append(edges.Edges, edge) +} + +// Sorted returns a sorted list of Edges. +func (edges Edges) Sorted() []*Edge { + es := make(edgeSorter, len(edges.Edges)) + copy(es, edges.Edges) + sort.Sort(es) + return es +} + +type edgeSorter []*Edge + +func (es edgeSorter) Len() int { return len(es) } +func (es edgeSorter) Swap(i, j int) { es[i], es[j] = es[j], es[i] } +func (es edgeSorter) Less(i, j int) bool { + if es[i].Src < es[j].Src { + return true + } else if es[i].Src > es[j].Src { + return false + } + + if es[i].Dst < es[j].Dst { + return true + } else if es[i].Dst > es[j].Dst { + return false + } + + if es[i].SrcPort < es[j].SrcPort { + return true + } else if es[i].SrcPort > es[j].SrcPort { + return false + } + + if es[i].DstPort < es[j].DstPort { + return true + } else if es[i].DstPort > es[j].DstPort { + return false + } + + if es[i].Dir != es[j].Dir { + return es[i].Dir + } + + attrs := es[i].Attrs.Copy() + for k, v := range es[j].Attrs { + attrs[k] = v + } + + for _, k := range attrs.sortedNames() { + if es[i].Attrs[k] < es[j].Attrs[k] { + return true + } else if es[i].Attrs[k] > es[j].Attrs[k] { + return false + } + } + + return false +} diff --git a/vendor/github.com/awalterschulze/gographviz/escape.go b/vendor/github.com/awalterschulze/gographviz/escape.go new file mode 100644 index 0000000..91e68d9 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/escape.go @@ -0,0 +1,195 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import ( + "fmt" + "strings" + "text/template" + "unicode" +) + +// Escape is just a Graph that escapes some strings when required. +type Escape struct { + *Graph +} + +// NewEscape returns a graph which will try to escape some strings when required +func NewEscape() *Escape { + return &Escape{NewGraph()} +} + +func isHTML(s string) bool { + if len(s) == 0 { + return false + } + ss := strings.TrimSpace(s) + if ss[0] != '<' { + return false + } + count := 0 + for _, c := range ss { + if c == '<' { + count++ + } + if c == '>' { + count-- + } + } + if count == 0 { + return true + } + return false +} + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || + ch >= 0x80 && unicode.IsLetter(ch) && ch != 'ε' +} + +func isID(s string) bool { + i := 0 + pos := false + for _, c := range s { + if i == 0 { + if !isLetter(c) { + return false + } + pos = true + } + if unicode.IsSpace(c) { + return false + } + if c == '-' { + return false + } + if c == '/' { + return false + } + i++ + } + return pos +} + +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +func isNumber(s string) bool { + state := 0 + for _, c := range s { + if state == 0 { + if isDigit(c) || c == '.' { + state = 2 + } else if c == '-' { + state = 1 + } else { + return false + } + } else if state == 1 { + if isDigit(c) || c == '.' { + state = 2 + } + } else if c != '.' && !isDigit(c) { + return false + } + } + return (state == 2) +} + +func isStringLit(s string) bool { + if !strings.HasPrefix(s, `"`) || !strings.HasSuffix(s, `"`) { + return false + } + var prev rune + for _, r := range s[1 : len(s)-1] { + if r == '"' && prev != '\\' { + return false + } + prev = r + } + return true +} + +func esc(s string) string { + if len(s) == 0 { + return s + } + if isHTML(s) { + return s + } + ss := strings.TrimSpace(s) + if ss[0] == '<' { + return fmt.Sprintf("\"%s\"", strings.Replace(s, "\"", "\\\"", -1)) + } + if isID(s) { + return s + } + if isNumber(s) { + return s + } + if isStringLit(s) { + return s + } + return fmt.Sprintf("\"%s\"", template.HTMLEscapeString(s)) +} + +func escAttrs(attrs map[string]string) map[string]string { + newAttrs := make(map[string]string) + for k, v := range attrs { + newAttrs[esc(k)] = esc(v) + } + return newAttrs +} + +// SetName sets the graph name and escapes it, if needed. +func (escape *Escape) SetName(name string) error { + return escape.Graph.SetName(esc(name)) +} + +// AddPortEdge adds an edge with ports and escapes the src, dst and attrs, if needed. +func (escape *Escape) AddPortEdge(src, srcPort, dst, dstPort string, directed bool, attrs map[string]string) error { + return escape.Graph.AddPortEdge(esc(src), srcPort, esc(dst), dstPort, directed, escAttrs(attrs)) +} + +// AddEdge adds an edge and escapes the src, dst and attrs, if needed. +func (escape *Escape) AddEdge(src, dst string, directed bool, attrs map[string]string) error { + return escape.AddPortEdge(src, "", dst, "", directed, attrs) +} + +// AddNode adds a node and escapes the parentGraph, name and attrs, if needed. +func (escape *Escape) AddNode(parentGraph string, name string, attrs map[string]string) error { + return escape.Graph.AddNode(esc(parentGraph), esc(name), escAttrs(attrs)) +} + +// AddAttr adds an attribute and escapes the parentGraph, field and value, if needed. +func (escape *Escape) AddAttr(parentGraph string, field, value string) error { + return escape.Graph.AddAttr(esc(parentGraph), esc(field), esc(value)) +} + +// AddSubGraph adds a subgraph and escapes the parentGraph, name and attrs, if needed. +func (escape *Escape) AddSubGraph(parentGraph string, name string, attrs map[string]string) error { + return escape.Graph.AddSubGraph(esc(parentGraph), esc(name), escAttrs(attrs)) +} + +// IsNode returns whether the, escaped if needed, name is a node in the graph. +func (escape *Escape) IsNode(name string) bool { + return escape.Graph.IsNode(esc(name)) +} + +// IsSubGraph returns whether the, escaped if needed, name is a subgraph in the grahp. +func (escape *Escape) IsSubGraph(name string) bool { + return escape.Graph.IsSubGraph(esc(name)) +} diff --git a/vendor/github.com/awalterschulze/gographviz/gographviz.go b/vendor/github.com/awalterschulze/gographviz/gographviz.go new file mode 100644 index 0000000..277228e --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/gographviz.go @@ -0,0 +1,58 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +//Package gographviz provides parsing for the DOT grammar into +//an abstract syntax tree representing a graph, +//analysis of the abstract syntax tree into a more usable structure, +//and writing back of this structure into the DOT format. +package gographviz + +import ( + "github.com/awalterschulze/gographviz/ast" + "github.com/awalterschulze/gographviz/internal/parser" +) + +var _ Interface = NewGraph() + +//Interface allows you to parse the graph into your own structure. +type Interface interface { + SetStrict(strict bool) error + SetDir(directed bool) error + SetName(name string) error + AddPortEdge(src, srcPort, dst, dstPort string, directed bool, attrs map[string]string) error + AddEdge(src, dst string, directed bool, attrs map[string]string) error + AddNode(parentGraph string, name string, attrs map[string]string) error + AddAttr(parentGraph string, field, value string) error + AddSubGraph(parentGraph string, name string, attrs map[string]string) error + String() string +} + +//Parse parses the buffer into a abstract syntax tree representing the graph. +func Parse(buf []byte) (*ast.Graph, error) { + return parser.ParseBytes(buf) +} + +//ParseString parses the buffer into a abstract syntax tree representing the graph. +func ParseString(buf string) (*ast.Graph, error) { + return parser.ParseBytes([]byte(buf)) +} + +//Read parses and creates a new Graph from the data. +func Read(buf []byte) (*Graph, error) { + st, err := Parse(buf) + if err != nil { + return nil, err + } + return NewAnalysedGraph(st) +} diff --git a/vendor/github.com/awalterschulze/gographviz/graph.go b/vendor/github.com/awalterschulze/gographviz/graph.go new file mode 100644 index 0000000..92b3f1c --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/graph.go @@ -0,0 +1,197 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import ( + "fmt" + "strings" +) + +// Graph is the analysed representation of the Graph parsed from the DOT format. +type Graph struct { + Attrs Attrs + Name string + Directed bool + Strict bool + Nodes *Nodes + Edges *Edges + SubGraphs *SubGraphs + Relations *Relations +} + +// NewGraph creates a new empty graph, ready to be populated. +func NewGraph() *Graph { + return &Graph{ + Attrs: make(Attrs), + Name: "", + Directed: false, + Strict: false, + Nodes: NewNodes(), + Edges: NewEdges(), + SubGraphs: NewSubGraphs(), + Relations: NewRelations(), + } +} + +// SetStrict sets whether a graph is strict. +// If the graph is strict then multiple edges are not allowed between the same pairs of nodes, +// see dot man page. +func (g *Graph) SetStrict(strict bool) error { + g.Strict = strict + return nil +} + +// SetDir sets whether the graph is directed (true) or undirected (false). +func (g *Graph) SetDir(dir bool) error { + g.Directed = dir + return nil +} + +// SetName sets the graph name. +func (g *Graph) SetName(name string) error { + g.Name = name + return nil +} + +// AddPortEdge adds an edge to the graph from node src to node dst. +// srcPort and dstPort are the port the node ports, leave as empty strings if it is not required. +// This does not imply the adding of missing nodes. +func (g *Graph) AddPortEdge(src, srcPort, dst, dstPort string, directed bool, attrs map[string]string) error { + as, err := NewAttrs(attrs) + if err != nil { + return err + } + g.Edges.Add(&Edge{src, srcPort, dst, dstPort, directed, as}) + return nil +} + +// AddEdge adds an edge to the graph from node src to node dst. +// This does not imply the adding of missing nodes. +// If directed is set to true then SetDir(true) must also be called or there will be a syntax error in the output. +func (g *Graph) AddEdge(src, dst string, directed bool, attrs map[string]string) error { + return g.AddPortEdge(src, "", dst, "", directed, attrs) +} + +// AddNode adds a node to a graph/subgraph. +// If not subgraph exists use the name of the main graph. +// This does not imply the adding of a missing subgraph. +func (g *Graph) AddNode(parentGraph string, name string, attrs map[string]string) error { + as, err := NewAttrs(attrs) + if err != nil { + return err + } + g.Nodes.Add(&Node{name, as}) + g.Relations.Add(parentGraph, name) + return nil +} + +// RemoveNode removes a node from the graph +func (g *Graph) RemoveNode(parentGraph string, name string) error { + err := g.Nodes.Remove(name) + if err != nil { + return err + } + + g.Relations.Remove(parentGraph, name) + + edges := NewEdges() + for _, e := range g.Edges.Edges { + if e.Dst == name || e.Src == name { + continue + } + + edges.Add(e) + } + + g.Edges = edges + + return nil +} + +func (g *Graph) getAttrs(graphName string) (Attrs, error) { + if g.Name == graphName { + return g.Attrs, nil + } + sub, ok := g.SubGraphs.SubGraphs[graphName] + if !ok { + return nil, fmt.Errorf("graph or subgraph %s does not exist", graphName) + } + return sub.Attrs, nil +} + +// AddAttr adds an attribute to a graph/subgraph. +func (g *Graph) AddAttr(parentGraph string, field string, value string) error { + a, err := g.getAttrs(parentGraph) + if err != nil { + return err + } + return a.Add(field, value) +} + +// AddSubGraph adds a subgraph to a graph/subgraph. +func (g *Graph) AddSubGraph(parentGraph string, name string, attrs map[string]string) error { + g.Relations.Add(parentGraph, name) + g.SubGraphs.Add(name) + for key, value := range attrs { + if err := g.AddAttr(name, key, value); err != nil { + return err + } + } + return nil +} + +// RemoveSubGraph removes the subgraph including nodes +func (g *Graph) RemoveSubGraph(parentGraph string, name string) error { + for child := range g.Relations.ParentToChildren[name] { + err := g.RemoveNode(parentGraph, child) + if err != nil { + return err + } + } + + g.Relations.Remove(parentGraph, name) + g.SubGraphs.Remove(name) + + edges := NewEdges() + for _, e := range g.Edges.Edges { + if e.Dst == name || e.DstPort == name || e.Src == name || e.SrcPort == name { + continue + } + + edges.Add(e) + } + + g.Edges = edges + + return nil +} + +// IsNode returns whether a given node name exists as a node in the graph. +func (g *Graph) IsNode(name string) bool { + _, ok := g.Nodes.Lookup[name] + return ok +} + +// IsSubGraph returns whether a given subgraph name exists as a subgraph in the graph. +func (g *Graph) IsSubGraph(name string) bool { + _, ok := g.SubGraphs.SubGraphs[name] + return ok +} + +func (g *Graph) isClusterSubGraph(name string) bool { + isSubGraph := g.IsSubGraph(name) + isCluster := strings.HasPrefix(name, "cluster") + return isSubGraph && isCluster +} diff --git a/vendor/github.com/awalterschulze/gographviz/install-godeps.sh b/vendor/github.com/awalterschulze/gographviz/install-godeps.sh new file mode 100644 index 0000000..d5878ae --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/install-godeps.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +set -xe +mkdir -p $GOPATH/src/githbub.com/goccmack +git clone https://github.com/goccmack/gocc $GOPATH/src/github.com/goccmack/gocc +go get golang.org/x/tools/cmd/goimports +go get github.com/kisielk/errcheck +go get -u golang.org/x/lint/golint \ No newline at end of file diff --git a/vendor/github.com/awalterschulze/gographviz/internal/errors/errors.go b/vendor/github.com/awalterschulze/gographviz/internal/errors/errors.go new file mode 100644 index 0000000..9577ddc --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/errors/errors.go @@ -0,0 +1,56 @@ +// Code generated by gocc; DO NOT EDIT. + +package errors + +import ( + "bytes" + "fmt" + + "github.com/awalterschulze/gographviz/internal/token" +) + +type ErrorSymbol interface { +} + +type Error struct { + Err error + ErrorToken *token.Token + ErrorSymbols []ErrorSymbol + ExpectedTokens []string + StackTop int +} + +func (e *Error) String() string { + w := new(bytes.Buffer) + fmt.Fprintf(w, "Error") + if e.Err != nil { + fmt.Fprintf(w, " %s\n", e.Err) + } else { + fmt.Fprintf(w, "\n") + } + fmt.Fprintf(w, "Token: type=%d, lit=%s\n", e.ErrorToken.Type, e.ErrorToken.Lit) + fmt.Fprintf(w, "Pos: offset=%d, line=%d, column=%d\n", e.ErrorToken.Pos.Offset, e.ErrorToken.Pos.Line, e.ErrorToken.Pos.Column) + fmt.Fprintf(w, "Expected one of: ") + for _, sym := range e.ExpectedTokens { + fmt.Fprintf(w, "%s ", sym) + } + fmt.Fprintf(w, "ErrorSymbol:\n") + for _, sym := range e.ErrorSymbols { + fmt.Fprintf(w, "%v\n", sym) + } + return w.String() +} + +func (e *Error) Error() string { + w := new(bytes.Buffer) + fmt.Fprintf(w, "Error in S%d: %s, %s", e.StackTop, token.TokMap.TokenString(e.ErrorToken), e.ErrorToken.Pos.String()) + if e.Err != nil { + fmt.Fprintf(w, ": %+v", e.Err) + } else { + fmt.Fprintf(w, ", expected one of: ") + for _, expected := range e.ExpectedTokens { + fmt.Fprintf(w, "%s ", expected) + } + } + return w.String() +} diff --git a/vendor/github.com/awalterschulze/gographviz/internal/lexer/acttab.go b/vendor/github.com/awalterschulze/gographviz/internal/lexer/acttab.go new file mode 100644 index 0000000..9946298 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/lexer/acttab.go @@ -0,0 +1,587 @@ +// Code generated by gocc; DO NOT EDIT. + +package lexer + +import ( + "fmt" + + "github.com/awalterschulze/gographviz/internal/token" +) + +type ActionTable [NumStates]ActionRow + +type ActionRow struct { + Accept token.Type + Ignore string +} + +func (a ActionRow) String() string { + return fmt.Sprintf("Accept=%d, Ignore=%s", a.Accept, a.Ignore) +} + +var ActTab = ActionTable{ + ActionRow{ // S0 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S1 + Accept: -1, + Ignore: "!whitespace", + }, + ActionRow{ // S2 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S3 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S4 + Accept: 13, + Ignore: "", + }, + ActionRow{ // S5 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S6 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S7 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S8 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S9 + Accept: 14, + Ignore: "", + }, + ActionRow{ // S10 + Accept: 7, + Ignore: "", + }, + ActionRow{ // S11 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S12 + Accept: 8, + Ignore: "", + }, + ActionRow{ // S13 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S14 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S15 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S16 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S17 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S18 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S19 + Accept: 11, + Ignore: "", + }, + ActionRow{ // S20 + Accept: 12, + Ignore: "", + }, + ActionRow{ // S21 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S22 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S23 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S24 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S25 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S26 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S27 + Accept: 3, + Ignore: "", + }, + ActionRow{ // S28 + Accept: 4, + Ignore: "", + }, + ActionRow{ // S29 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S30 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S31 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S32 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S33 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S34 + Accept: -1, + Ignore: "!comment", + }, + ActionRow{ // S35 + Accept: 17, + Ignore: "", + }, + ActionRow{ // S36 + Accept: 16, + Ignore: "", + }, + ActionRow{ // S37 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S38 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S39 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S40 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S41 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S42 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S43 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S44 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S45 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S46 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S47 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S48 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S49 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S50 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S51 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S52 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S53 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S54 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S55 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S56 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S57 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S58 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S59 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S60 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S61 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S62 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S63 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S64 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S65 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S66 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S67 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S68 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S69 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S70 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S71 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S72 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S73 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S74 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S75 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S76 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S77 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S78 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S79 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S80 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S81 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S82 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S83 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S84 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S85 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S86 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S87 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S88 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S89 + Accept: -1, + Ignore: "!comment", + }, + ActionRow{ // S90 + Accept: 0, + Ignore: "", + }, + ActionRow{ // S91 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S92 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S93 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S94 + Accept: 10, + Ignore: "", + }, + ActionRow{ // S95 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S96 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S97 + Accept: 9, + Ignore: "", + }, + ActionRow{ // S98 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S99 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S100 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S101 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S102 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S103 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S104 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S105 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S106 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S107 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S108 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S109 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S110 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S111 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S112 + Accept: 2, + Ignore: "", + }, + ActionRow{ // S113 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S114 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S115 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S116 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S117 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S118 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S119 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S120 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S121 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S122 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S123 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S124 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S125 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S126 + Accept: 5, + Ignore: "", + }, + ActionRow{ // S127 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S128 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S129 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S130 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S131 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S132 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S133 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S134 + Accept: 6, + Ignore: "", + }, + ActionRow{ // S135 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S136 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S137 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S138 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S139 + Accept: 18, + Ignore: "", + }, + ActionRow{ // S140 + Accept: 15, + Ignore: "", + }, +} diff --git a/vendor/github.com/awalterschulze/gographviz/internal/lexer/lexer.go b/vendor/github.com/awalterschulze/gographviz/internal/lexer/lexer.go new file mode 100644 index 0000000..c967f28 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/lexer/lexer.go @@ -0,0 +1,300 @@ +// Code generated by gocc; DO NOT EDIT. + +package lexer + +import ( + "io/ioutil" + "unicode/utf8" + + "github.com/awalterschulze/gographviz/internal/token" +) + +const ( + NoState = -1 + NumStates = 141 + NumSymbols = 184 +) + +type Lexer struct { + src []byte + pos int + line int + column int +} + +func NewLexer(src []byte) *Lexer { + lexer := &Lexer{ + src: src, + pos: 0, + line: 1, + column: 1, + } + return lexer +} + +func NewLexerFile(fpath string) (*Lexer, error) { + src, err := ioutil.ReadFile(fpath) + if err != nil { + return nil, err + } + return NewLexer(src), nil +} + +func (l *Lexer) Scan() (tok *token.Token) { + tok = new(token.Token) + if l.pos >= len(l.src) { + tok.Type = token.EOF + tok.Pos.Offset, tok.Pos.Line, tok.Pos.Column = l.pos, l.line, l.column + return + } + start, startLine, startColumn, end := l.pos, l.line, l.column, 0 + tok.Type = token.INVALID + state, rune1, size := 0, rune(-1), 0 + for state != -1 { + if l.pos >= len(l.src) { + rune1 = -1 + } else { + rune1, size = utf8.DecodeRune(l.src[l.pos:]) + l.pos += size + } + + nextState := -1 + if rune1 != -1 { + nextState = TransTab[state](rune1) + } + state = nextState + + if state != -1 { + + switch rune1 { + case '\n': + l.line++ + l.column = 1 + case '\r': + l.column = 1 + case '\t': + l.column += 4 + default: + l.column++ + } + + switch { + case ActTab[state].Accept != -1: + tok.Type = ActTab[state].Accept + end = l.pos + case ActTab[state].Ignore != "": + start, startLine, startColumn = l.pos, l.line, l.column + state = 0 + if start >= len(l.src) { + tok.Type = token.EOF + } + + } + } else { + if tok.Type == token.INVALID { + end = l.pos + } + } + } + if end > start { + l.pos = end + tok.Lit = l.src[start:end] + } else { + tok.Lit = []byte{} + } + tok.Pos.Offset, tok.Pos.Line, tok.Pos.Column = start, startLine, startColumn + + return +} + +func (l *Lexer) Reset() { + l.pos = 0 +} + +/* +Lexer symbols: +0: 'n' +1: 'o' +2: 'd' +3: 'e' +4: 'N' +5: 'o' +6: 'd' +7: 'e' +8: 'N' +9: 'O' +10: 'D' +11: 'E' +12: 'e' +13: 'd' +14: 'g' +15: 'e' +16: 'E' +17: 'd' +18: 'g' +19: 'e' +20: 'E' +21: 'D' +22: 'G' +23: 'E' +24: 'g' +25: 'r' +26: 'a' +27: 'p' +28: 'h' +29: 'G' +30: 'r' +31: 'a' +32: 'p' +33: 'h' +34: 'G' +35: 'R' +36: 'A' +37: 'P' +38: 'H' +39: 'd' +40: 'i' +41: 'g' +42: 'r' +43: 'a' +44: 'p' +45: 'h' +46: 'D' +47: 'i' +48: 'g' +49: 'r' +50: 'a' +51: 'p' +52: 'h' +53: 'd' +54: 'i' +55: 'G' +56: 'r' +57: 'a' +58: 'p' +59: 'h' +60: 'D' +61: 'i' +62: 'G' +63: 'r' +64: 'a' +65: 'p' +66: 'h' +67: 'D' +68: 'I' +69: 'G' +70: 'R' +71: 'A' +72: 'P' +73: 'H' +74: 's' +75: 'u' +76: 'b' +77: 'g' +78: 'r' +79: 'a' +80: 'p' +81: 'h' +82: 'S' +83: 'u' +84: 'b' +85: 'g' +86: 'r' +87: 'a' +88: 'p' +89: 'h' +90: 's' +91: 'u' +92: 'b' +93: 'G' +94: 'r' +95: 'a' +96: 'p' +97: 'h' +98: 'S' +99: 'u' +100: 'b' +101: 'G' +102: 'r' +103: 'a' +104: 'p' +105: 'h' +106: 'S' +107: 'U' +108: 'B' +109: 'G' +110: 'R' +111: 'A' +112: 'P' +113: 'H' +114: 's' +115: 't' +116: 'r' +117: 'i' +118: 'c' +119: 't' +120: 'S' +121: 't' +122: 'r' +123: 'i' +124: 'c' +125: 't' +126: 'S' +127: 'T' +128: 'R' +129: 'I' +130: 'C' +131: 'T' +132: '{' +133: '}' +134: ';' +135: '=' +136: '[' +137: ']' +138: ',' +139: ':' +140: '-' +141: '>' +142: '-' +143: '-' +144: '_' +145: '-' +146: '.' +147: '-' +148: '.' +149: '\' +150: '"' +151: '\' +152: '"' +153: '"' +154: '=' +155: '<' +156: '>' +157: '<' +158: '>' +159: '/' +160: '/' +161: '\n' +162: '#' +163: '\n' +164: '/' +165: '*' +166: '*' +167: '*' +168: '/' +169: ' ' +170: '\t' +171: '\r' +172: '\n' +173: \u0001-'!' +174: '#'-'[' +175: ']'-\u007f +176: 'a'-'z' +177: 'A'-'Z' +178: '0'-'9' +179: \u0080-\ufffc +180: \ufffe-\U0010ffff +181: \u0001-';' +182: '?'-\u00ff +183: . +*/ diff --git a/vendor/github.com/awalterschulze/gographviz/internal/lexer/transitiontable.go b/vendor/github.com/awalterschulze/gographviz/internal/lexer/transitiontable.go new file mode 100644 index 0000000..34782b9 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/lexer/transitiontable.go @@ -0,0 +1,2731 @@ +// Code generated by gocc; DO NOT EDIT. + +package lexer + +/* +Let s be the current state +Let r be the current input rune +transitionTable[s](r) returns the next state. +*/ +type TransitionTable [NumStates]func(rune) int + +var TransTab = TransitionTable{ + // S0 + func(r rune) int { + switch { + case r == 9: // ['\t','\t'] + return 1 + case r == 10: // ['\n','\n'] + return 1 + case r == 13: // ['\r','\r'] + return 1 + case r == 32: // [' ',' '] + return 1 + case r == 34: // ['"','"'] + return 2 + case r == 35: // ['#','#'] + return 3 + case r == 44: // [',',','] + return 4 + case r == 45: // ['-','-'] + return 5 + case r == 46: // ['.','.'] + return 6 + case r == 47: // ['/','/'] + return 7 + case 48 <= r && r <= 57: // ['0','9'] + return 8 + case r == 58: // [':',':'] + return 9 + case r == 59: // [';',';'] + return 10 + case r == 60: // ['<','<'] + return 11 + case r == 61: // ['=','='] + return 12 + case 65 <= r && r <= 67: // ['A','C'] + return 13 + case r == 68: // ['D','D'] + return 14 + case r == 69: // ['E','E'] + return 15 + case r == 70: // ['F','F'] + return 13 + case r == 71: // ['G','G'] + return 16 + case 72 <= r && r <= 77: // ['H','M'] + return 13 + case r == 78: // ['N','N'] + return 17 + case 79 <= r && r <= 82: // ['O','R'] + return 13 + case r == 83: // ['S','S'] + return 18 + case 84 <= r && r <= 90: // ['T','Z'] + return 13 + case r == 91: // ['[','['] + return 19 + case r == 93: // [']',']'] + return 20 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 99: // ['a','c'] + return 13 + case r == 100: // ['d','d'] + return 22 + case r == 101: // ['e','e'] + return 23 + case r == 102: // ['f','f'] + return 13 + case r == 103: // ['g','g'] + return 24 + case 104 <= r && r <= 109: // ['h','m'] + return 13 + case r == 110: // ['n','n'] + return 25 + case 111 <= r && r <= 114: // ['o','r'] + return 13 + case r == 115: // ['s','s'] + return 26 + case 116 <= r && r <= 122: // ['t','z'] + return 13 + case r == 123: // ['{','{'] + return 27 + case r == 125: // ['}','}'] + return 28 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S1 + func(r rune) int { + switch { + } + return NoState + }, + // S2 + func(r rune) int { + switch { + case 1 <= r && r <= 33: // [\u0001,'!'] + return 30 + case r == 34: // ['"','"'] + return 31 + case 35 <= r && r <= 91: // ['#','['] + return 30 + case r == 92: // ['\','\'] + return 32 + case 93 <= r && r <= 127: // [']',\u007f] + return 30 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 33 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 33 + } + return NoState + }, + // S3 + func(r rune) int { + switch { + case r == 10: // ['\n','\n'] + return 34 + default: + return 3 + } + }, + // S4 + func(r rune) int { + switch { + } + return NoState + }, + // S5 + func(r rune) int { + switch { + case r == 45: // ['-','-'] + return 35 + case r == 46: // ['.','.'] + return 6 + case 48 <= r && r <= 57: // ['0','9'] + return 8 + case r == 62: // ['>','>'] + return 36 + } + return NoState + }, + // S6 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 37 + } + return NoState + }, + // S7 + func(r rune) int { + switch { + case r == 42: // ['*','*'] + return 38 + case r == 47: // ['/','/'] + return 39 + } + return NoState + }, + // S8 + func(r rune) int { + switch { + case r == 46: // ['.','.'] + return 40 + case 48 <= r && r <= 57: // ['0','9'] + return 8 + } + return NoState + }, + // S9 + func(r rune) int { + switch { + } + return NoState + }, + // S10 + func(r rune) int { + switch { + } + return NoState + }, + // S11 + func(r rune) int { + switch { + case 1 <= r && r <= 59: // [\u0001,';'] + return 41 + case r == 60: // ['<','<'] + return 42 + case r == 61: // ['=','='] + return 41 + case r == 62: // ['>','>'] + return 43 + case 63 <= r && r <= 255: // ['?',\u00ff] + return 41 + } + return NoState + }, + // S12 + func(r rune) int { + switch { + } + return NoState + }, + // S13 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S14 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 72: // ['A','H'] + return 13 + case r == 73: // ['I','I'] + return 45 + case 74 <= r && r <= 90: // ['J','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 104: // ['a','h'] + return 13 + case r == 105: // ['i','i'] + return 46 + case 106 <= r && r <= 122: // ['j','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S15 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 67: // ['A','C'] + return 13 + case r == 68: // ['D','D'] + return 47 + case 69 <= r && r <= 90: // ['E','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 99: // ['a','c'] + return 13 + case r == 100: // ['d','d'] + return 48 + case 101 <= r && r <= 122: // ['e','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S16 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 81: // ['A','Q'] + return 13 + case r == 82: // ['R','R'] + return 49 + case 83 <= r && r <= 90: // ['S','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 50 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S17 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 78: // ['A','N'] + return 13 + case r == 79: // ['O','O'] + return 51 + case 80 <= r && r <= 90: // ['P','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 110: // ['a','n'] + return 13 + case r == 111: // ['o','o'] + return 52 + case 112 <= r && r <= 122: // ['p','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S18 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 83: // ['A','S'] + return 13 + case r == 84: // ['T','T'] + return 53 + case r == 85: // ['U','U'] + return 54 + case 86 <= r && r <= 90: // ['V','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 115: // ['a','s'] + return 13 + case r == 116: // ['t','t'] + return 55 + case r == 117: // ['u','u'] + return 56 + case 118 <= r && r <= 122: // ['v','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S19 + func(r rune) int { + switch { + } + return NoState + }, + // S20 + func(r rune) int { + switch { + } + return NoState + }, + // S21 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S22 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 104: // ['a','h'] + return 13 + case r == 105: // ['i','i'] + return 57 + case 106 <= r && r <= 122: // ['j','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S23 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 99: // ['a','c'] + return 13 + case r == 100: // ['d','d'] + return 58 + case 101 <= r && r <= 122: // ['e','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S24 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 59 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S25 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 110: // ['a','n'] + return 13 + case r == 111: // ['o','o'] + return 60 + case 112 <= r && r <= 122: // ['p','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S26 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 115: // ['a','s'] + return 13 + case r == 116: // ['t','t'] + return 61 + case r == 117: // ['u','u'] + return 62 + case 118 <= r && r <= 122: // ['v','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S27 + func(r rune) int { + switch { + } + return NoState + }, + // S28 + func(r rune) int { + switch { + } + return NoState + }, + // S29 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S30 + func(r rune) int { + switch { + case 1 <= r && r <= 33: // [\u0001,'!'] + return 30 + case r == 34: // ['"','"'] + return 31 + case 35 <= r && r <= 91: // ['#','['] + return 30 + case r == 92: // ['\','\'] + return 32 + case 93 <= r && r <= 127: // [']',\u007f] + return 30 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 33 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 33 + } + return NoState + }, + // S31 + func(r rune) int { + switch { + } + return NoState + }, + // S32 + func(r rune) int { + switch { + case 1 <= r && r <= 33: // [\u0001,'!'] + return 63 + case r == 34: // ['"','"'] + return 64 + case 35 <= r && r <= 91: // ['#','['] + return 63 + case r == 92: // ['\','\'] + return 64 + case 93 <= r && r <= 127: // [']',\u007f] + return 63 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 65 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 65 + } + return NoState + }, + // S33 + func(r rune) int { + switch { + case 1 <= r && r <= 33: // [\u0001,'!'] + return 30 + case r == 34: // ['"','"'] + return 31 + case 35 <= r && r <= 91: // ['#','['] + return 30 + case r == 92: // ['\','\'] + return 32 + case 93 <= r && r <= 127: // [']',\u007f] + return 30 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 33 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 33 + } + return NoState + }, + // S34 + func(r rune) int { + switch { + } + return NoState + }, + // S35 + func(r rune) int { + switch { + } + return NoState + }, + // S36 + func(r rune) int { + switch { + } + return NoState + }, + // S37 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 37 + } + return NoState + }, + // S38 + func(r rune) int { + switch { + case r == 42: // ['*','*'] + return 66 + default: + return 38 + } + }, + // S39 + func(r rune) int { + switch { + case r == 10: // ['\n','\n'] + return 34 + default: + return 39 + } + }, + // S40 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 67 + } + return NoState + }, + // S41 + func(r rune) int { + switch { + case 1 <= r && r <= 59: // [\u0001,';'] + return 41 + case r == 60: // ['<','<'] + return 42 + case r == 61: // ['=','='] + return 41 + case r == 62: // ['>','>'] + return 43 + case 63 <= r && r <= 255: // ['?',\u00ff] + return 41 + } + return NoState + }, + // S42 + func(r rune) int { + switch { + case 1 <= r && r <= 59: // [\u0001,';'] + return 68 + case r == 61: // ['=','='] + return 68 + case 63 <= r && r <= 255: // ['?',\u00ff] + return 68 + } + return NoState + }, + // S43 + func(r rune) int { + switch { + } + return NoState + }, + // S44 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S45 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 70: // ['A','F'] + return 13 + case r == 71: // ['G','G'] + return 69 + case 72 <= r && r <= 90: // ['H','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S46 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 70: // ['A','F'] + return 13 + case r == 71: // ['G','G'] + return 70 + case 72 <= r && r <= 90: // ['H','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 102: // ['a','f'] + return 13 + case r == 103: // ['g','g'] + return 71 + case 104 <= r && r <= 122: // ['h','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S47 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 70: // ['A','F'] + return 13 + case r == 71: // ['G','G'] + return 72 + case 72 <= r && r <= 90: // ['H','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S48 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 102: // ['a','f'] + return 13 + case r == 103: // ['g','g'] + return 73 + case 104 <= r && r <= 122: // ['h','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S49 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case r == 65: // ['A','A'] + return 74 + case 66 <= r && r <= 90: // ['B','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S50 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 75 + case 98 <= r && r <= 122: // ['b','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S51 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 67: // ['A','C'] + return 13 + case r == 68: // ['D','D'] + return 76 + case 69 <= r && r <= 90: // ['E','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S52 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 99: // ['a','c'] + return 13 + case r == 100: // ['d','d'] + return 77 + case 101 <= r && r <= 122: // ['e','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S53 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 81: // ['A','Q'] + return 13 + case r == 82: // ['R','R'] + return 78 + case 83 <= r && r <= 90: // ['S','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S54 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case r == 65: // ['A','A'] + return 13 + case r == 66: // ['B','B'] + return 79 + case 67 <= r && r <= 90: // ['C','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S55 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 80 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S56 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 13 + case r == 98: // ['b','b'] + return 81 + case 99 <= r && r <= 122: // ['c','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S57 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 70: // ['A','F'] + return 13 + case r == 71: // ['G','G'] + return 82 + case 72 <= r && r <= 90: // ['H','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 102: // ['a','f'] + return 13 + case r == 103: // ['g','g'] + return 83 + case 104 <= r && r <= 122: // ['h','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S58 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 102: // ['a','f'] + return 13 + case r == 103: // ['g','g'] + return 84 + case 104 <= r && r <= 122: // ['h','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S59 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 85 + case 98 <= r && r <= 122: // ['b','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S60 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 99: // ['a','c'] + return 13 + case r == 100: // ['d','d'] + return 86 + case 101 <= r && r <= 122: // ['e','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S61 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 87 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S62 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 13 + case r == 98: // ['b','b'] + return 88 + case 99 <= r && r <= 122: // ['c','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S63 + func(r rune) int { + switch { + case 1 <= r && r <= 33: // [\u0001,'!'] + return 30 + case r == 34: // ['"','"'] + return 31 + case 35 <= r && r <= 91: // ['#','['] + return 30 + case r == 92: // ['\','\'] + return 32 + case 93 <= r && r <= 127: // [']',\u007f] + return 30 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 33 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 33 + } + return NoState + }, + // S64 + func(r rune) int { + switch { + case 1 <= r && r <= 33: // [\u0001,'!'] + return 30 + case r == 34: // ['"','"'] + return 31 + case 35 <= r && r <= 91: // ['#','['] + return 30 + case r == 92: // ['\','\'] + return 32 + case 93 <= r && r <= 127: // [']',\u007f] + return 30 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 33 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 33 + } + return NoState + }, + // S65 + func(r rune) int { + switch { + case 1 <= r && r <= 33: // [\u0001,'!'] + return 30 + case r == 34: // ['"','"'] + return 31 + case 35 <= r && r <= 91: // ['#','['] + return 30 + case r == 92: // ['\','\'] + return 32 + case 93 <= r && r <= 127: // [']',\u007f] + return 30 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 33 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 33 + } + return NoState + }, + // S66 + func(r rune) int { + switch { + case r == 42: // ['*','*'] + return 66 + case r == 47: // ['/','/'] + return 89 + default: + return 38 + } + }, + // S67 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 67 + } + return NoState + }, + // S68 + func(r rune) int { + switch { + case 1 <= r && r <= 59: // [\u0001,';'] + return 68 + case r == 61: // ['=','='] + return 68 + case r == 62: // ['>','>'] + return 90 + case 63 <= r && r <= 255: // ['?',\u00ff] + return 68 + } + return NoState + }, + // S69 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 81: // ['A','Q'] + return 13 + case r == 82: // ['R','R'] + return 91 + case 83 <= r && r <= 90: // ['S','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S70 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 92 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S71 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 93 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S72 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 68: // ['A','D'] + return 13 + case r == 69: // ['E','E'] + return 94 + case 70 <= r && r <= 90: // ['F','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S73 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 100: // ['a','d'] + return 13 + case r == 101: // ['e','e'] + return 94 + case 102 <= r && r <= 122: // ['f','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S74 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 79: // ['A','O'] + return 13 + case r == 80: // ['P','P'] + return 95 + case 81 <= r && r <= 90: // ['Q','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S75 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 111: // ['a','o'] + return 13 + case r == 112: // ['p','p'] + return 96 + case 113 <= r && r <= 122: // ['q','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S76 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 68: // ['A','D'] + return 13 + case r == 69: // ['E','E'] + return 97 + case 70 <= r && r <= 90: // ['F','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S77 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 100: // ['a','d'] + return 13 + case r == 101: // ['e','e'] + return 97 + case 102 <= r && r <= 122: // ['f','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S78 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 72: // ['A','H'] + return 13 + case r == 73: // ['I','I'] + return 98 + case 74 <= r && r <= 90: // ['J','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S79 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 70: // ['A','F'] + return 13 + case r == 71: // ['G','G'] + return 99 + case 72 <= r && r <= 90: // ['H','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S80 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 104: // ['a','h'] + return 13 + case r == 105: // ['i','i'] + return 100 + case 106 <= r && r <= 122: // ['j','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S81 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 70: // ['A','F'] + return 13 + case r == 71: // ['G','G'] + return 101 + case 72 <= r && r <= 90: // ['H','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 102: // ['a','f'] + return 13 + case r == 103: // ['g','g'] + return 102 + case 104 <= r && r <= 122: // ['h','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S82 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 103 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S83 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 104 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S84 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 100: // ['a','d'] + return 13 + case r == 101: // ['e','e'] + return 94 + case 102 <= r && r <= 122: // ['f','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S85 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 111: // ['a','o'] + return 13 + case r == 112: // ['p','p'] + return 105 + case 113 <= r && r <= 122: // ['q','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S86 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 100: // ['a','d'] + return 13 + case r == 101: // ['e','e'] + return 97 + case 102 <= r && r <= 122: // ['f','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S87 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 104: // ['a','h'] + return 13 + case r == 105: // ['i','i'] + return 106 + case 106 <= r && r <= 122: // ['j','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S88 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 70: // ['A','F'] + return 13 + case r == 71: // ['G','G'] + return 107 + case 72 <= r && r <= 90: // ['H','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 102: // ['a','f'] + return 13 + case r == 103: // ['g','g'] + return 108 + case 104 <= r && r <= 122: // ['h','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S89 + func(r rune) int { + switch { + } + return NoState + }, + // S90 + func(r rune) int { + switch { + case 1 <= r && r <= 59: // [\u0001,';'] + return 41 + case r == 60: // ['<','<'] + return 42 + case r == 61: // ['=','='] + return 41 + case r == 62: // ['>','>'] + return 43 + case 63 <= r && r <= 255: // ['?',\u00ff] + return 41 + } + return NoState + }, + // S91 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case r == 65: // ['A','A'] + return 109 + case 66 <= r && r <= 90: // ['B','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S92 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 110 + case 98 <= r && r <= 122: // ['b','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S93 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 111 + case 98 <= r && r <= 122: // ['b','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S94 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S95 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 71: // ['A','G'] + return 13 + case r == 72: // ['H','H'] + return 112 + case 73 <= r && r <= 90: // ['I','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S96 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 103: // ['a','g'] + return 13 + case r == 104: // ['h','h'] + return 112 + case 105 <= r && r <= 122: // ['i','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S97 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S98 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 66: // ['A','B'] + return 13 + case r == 67: // ['C','C'] + return 113 + case 68 <= r && r <= 90: // ['D','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S99 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 81: // ['A','Q'] + return 13 + case r == 82: // ['R','R'] + return 114 + case 83 <= r && r <= 90: // ['S','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S100 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 98: // ['a','b'] + return 13 + case r == 99: // ['c','c'] + return 115 + case 100 <= r && r <= 122: // ['d','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S101 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 116 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S102 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 117 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S103 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 118 + case 98 <= r && r <= 122: // ['b','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S104 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 119 + case 98 <= r && r <= 122: // ['b','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S105 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 103: // ['a','g'] + return 13 + case r == 104: // ['h','h'] + return 112 + case 105 <= r && r <= 122: // ['i','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S106 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 98: // ['a','b'] + return 13 + case r == 99: // ['c','c'] + return 120 + case 100 <= r && r <= 122: // ['d','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S107 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 121 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S108 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 113: // ['a','q'] + return 13 + case r == 114: // ['r','r'] + return 122 + case 115 <= r && r <= 122: // ['s','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S109 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 79: // ['A','O'] + return 13 + case r == 80: // ['P','P'] + return 123 + case 81 <= r && r <= 90: // ['Q','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S110 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 111: // ['a','o'] + return 13 + case r == 112: // ['p','p'] + return 124 + case 113 <= r && r <= 122: // ['q','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S111 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 111: // ['a','o'] + return 13 + case r == 112: // ['p','p'] + return 125 + case 113 <= r && r <= 122: // ['q','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S112 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S113 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 83: // ['A','S'] + return 13 + case r == 84: // ['T','T'] + return 126 + case 85 <= r && r <= 90: // ['U','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S114 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case r == 65: // ['A','A'] + return 127 + case 66 <= r && r <= 90: // ['B','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S115 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 115: // ['a','s'] + return 13 + case r == 116: // ['t','t'] + return 126 + case 117 <= r && r <= 122: // ['u','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S116 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 128 + case 98 <= r && r <= 122: // ['b','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S117 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 129 + case 98 <= r && r <= 122: // ['b','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S118 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 111: // ['a','o'] + return 13 + case r == 112: // ['p','p'] + return 130 + case 113 <= r && r <= 122: // ['q','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S119 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 111: // ['a','o'] + return 13 + case r == 112: // ['p','p'] + return 131 + case 113 <= r && r <= 122: // ['q','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S120 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 115: // ['a','s'] + return 13 + case r == 116: // ['t','t'] + return 126 + case 117 <= r && r <= 122: // ['u','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S121 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 132 + case 98 <= r && r <= 122: // ['b','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S122 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case r == 97: // ['a','a'] + return 133 + case 98 <= r && r <= 122: // ['b','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S123 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 71: // ['A','G'] + return 13 + case r == 72: // ['H','H'] + return 134 + case 73 <= r && r <= 90: // ['I','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S124 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 103: // ['a','g'] + return 13 + case r == 104: // ['h','h'] + return 134 + case 105 <= r && r <= 122: // ['i','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S125 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 103: // ['a','g'] + return 13 + case r == 104: // ['h','h'] + return 134 + case 105 <= r && r <= 122: // ['i','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S126 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S127 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 79: // ['A','O'] + return 13 + case r == 80: // ['P','P'] + return 135 + case 81 <= r && r <= 90: // ['Q','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S128 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 111: // ['a','o'] + return 13 + case r == 112: // ['p','p'] + return 136 + case 113 <= r && r <= 122: // ['q','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S129 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 111: // ['a','o'] + return 13 + case r == 112: // ['p','p'] + return 137 + case 113 <= r && r <= 122: // ['q','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S130 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 103: // ['a','g'] + return 13 + case r == 104: // ['h','h'] + return 134 + case 105 <= r && r <= 122: // ['i','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S131 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 103: // ['a','g'] + return 13 + case r == 104: // ['h','h'] + return 134 + case 105 <= r && r <= 122: // ['i','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S132 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 111: // ['a','o'] + return 13 + case r == 112: // ['p','p'] + return 138 + case 113 <= r && r <= 122: // ['q','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S133 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 111: // ['a','o'] + return 13 + case r == 112: // ['p','p'] + return 139 + case 113 <= r && r <= 122: // ['q','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S134 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S135 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 71: // ['A','G'] + return 13 + case r == 72: // ['H','H'] + return 140 + case 73 <= r && r <= 90: // ['I','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S136 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 103: // ['a','g'] + return 13 + case r == 104: // ['h','h'] + return 140 + case 105 <= r && r <= 122: // ['i','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S137 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 103: // ['a','g'] + return 13 + case r == 104: // ['h','h'] + return 140 + case 105 <= r && r <= 122: // ['i','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S138 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 103: // ['a','g'] + return 13 + case r == 104: // ['h','h'] + return 140 + case 105 <= r && r <= 122: // ['i','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S139 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 103: // ['a','g'] + return 13 + case r == 104: // ['h','h'] + return 140 + case 105 <= r && r <= 122: // ['i','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, + // S140 + func(r rune) int { + switch { + case 48 <= r && r <= 57: // ['0','9'] + return 44 + case 65 <= r && r <= 90: // ['A','Z'] + return 13 + case r == 95: // ['_','_'] + return 21 + case 97 <= r && r <= 122: // ['a','z'] + return 13 + case 128 <= r && r <= 65532: // [\u0080,\ufffc] + return 29 + case 65534 <= r && r <= 1114111: // [\ufffe,\U0010ffff] + return 29 + } + return NoState + }, +} diff --git a/vendor/github.com/awalterschulze/gographviz/internal/parser/action.go b/vendor/github.com/awalterschulze/gographviz/internal/parser/action.go new file mode 100644 index 0000000..54bc55e --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/parser/action.go @@ -0,0 +1,51 @@ +// Code generated by gocc; DO NOT EDIT. + +package parser + +import ( + "fmt" +) + +type action interface { + act() + String() string +} + +type ( + accept bool + shift int // value is next state index + reduce int // value is production index +) + +func (this accept) act() {} +func (this shift) act() {} +func (this reduce) act() {} + +func (this accept) Equal(that action) bool { + if _, ok := that.(accept); ok { + return true + } + return false +} + +func (this reduce) Equal(that action) bool { + that1, ok := that.(reduce) + if !ok { + return false + } + return this == that1 +} + +func (this shift) Equal(that action) bool { + that1, ok := that.(shift) + if !ok { + return false + } + return this == that1 +} + +func (this accept) String() string { return "accept(0)" } +func (this shift) String() string { return fmt.Sprintf("shift:%d", this) } +func (this reduce) String() string { + return fmt.Sprintf("reduce:%d(%s)", this, productionsTable[this].String) +} diff --git a/vendor/github.com/awalterschulze/gographviz/internal/parser/actiontable.go b/vendor/github.com/awalterschulze/gographviz/internal/parser/actiontable.go new file mode 100644 index 0000000..2a8629f --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/parser/actiontable.go @@ -0,0 +1,152 @@ +// Code generated by gocc; DO NOT EDIT. + +package parser + +import ( + "bytes" + "compress/gzip" + "encoding/gob" +) + +type ( + actionTable [numStates]actionRow + actionRow struct { + canRecover bool + actions [numSymbols]action + } +) + +var actionTab = actionTable{} + +func init() { + tab := []struct { + CanRecover bool + Actions []struct { + Index int + Action int + Amount int + } + }{} + data := []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x9c, 0x97, 0x4f, 0x88, 0x5b, 0x55, + 0x1b, 0xc6, 0xcf, 0x7b, 0xe7, 0x7e, 0xd3, 0xf9, 0x66, 0xa6, 0xa5, 0x0c, 0xf3, 0x85, 0x61, 0x18, + 0x86, 0x10, 0xc2, 0x10, 0x42, 0x08, 0x21, 0x0c, 0x21, 0xe4, 0x8b, 0x21, 0x8e, 0x65, 0xd0, 0x21, + 0x84, 0x10, 0x42, 0x08, 0x31, 0x86, 0x9a, 0xa6, 0x21, 0x0d, 0xf1, 0x36, 0x4c, 0xd3, 0xa1, 0xe0, + 0x1f, 0x6a, 0xad, 0xda, 0x95, 0xb8, 0x70, 0xe1, 0xca, 0xa5, 0x0b, 0x17, 0x22, 0xe2, 0x42, 0x5c, + 0x88, 0x0b, 0xe9, 0x42, 0x5c, 0xba, 0x10, 0x17, 0x22, 0x2e, 0x5c, 0x89, 0xb8, 0x10, 0x11, 0x17, + 0xbd, 0x72, 0xcf, 0x2f, 0x3d, 0x99, 0xd4, 0xc9, 0xe4, 0x26, 0x74, 0x71, 0x6e, 0x4e, 0xef, 0xf3, + 0x7b, 0x9e, 0x73, 0xce, 0x7b, 0xce, 0x3d, 0x73, 0xd1, 0x7d, 0xdb, 0x12, 0xcb, 0x7d, 0xa0, 0xc4, + 0xbd, 0xa7, 0x54, 0xc4, 0x7d, 0x7d, 0x49, 0x2c, 0xf7, 0x9e, 0x12, 0x4b, 0x56, 0x9f, 0x79, 0xd1, + 0x29, 0x5f, 0xbf, 0x76, 0xf3, 0xe4, 0xfa, 0xb1, 0x58, 0x4a, 0x2e, 0x3c, 0x7d, 0x6d, 0x78, 0xe3, + 0xa6, 0x73, 0x4b, 0xdc, 0xb7, 0x94, 0x52, 0x4f, 0xb9, 0x6f, 0x5a, 0x22, 0xf1, 0xe7, 0x5f, 0xb8, + 0x35, 0x3c, 0xbe, 0x7d, 0x6d, 0x18, 0x7c, 0x39, 0xf8, 0x9c, 0xd3, 0xbe, 0x7e, 0x27, 0x78, 0xc3, + 0x19, 0xfe, 0x3f, 0xc8, 0x9b, 0xa3, 0xe7, 0x97, 0x6e, 0xde, 0x76, 0x86, 0xde, 0x73, 0xf0, 0x55, + 0x4f, 0x2a, 0xee, 0x7d, 0xa5, 0x62, 0xee, 0x1b, 0x9e, 0xcd, 0x7d, 0x25, 0x4b, 0xf2, 0x1f, 0x2d, + 0x14, 0x5b, 0xc9, 0x32, 0x3a, 0x1e, 0xb5, 0x4c, 0x6c, 0xa5, 0xd4, 0xa3, 0xff, 0x3d, 0x74, 0x1f, + 0x28, 0xf7, 0xae, 0xb5, 0x24, 0xb6, 0xf7, 0x4f, 0xc9, 0xaa, 0xd8, 0xb2, 0xac, 0x64, 0x5d, 0x6c, + 0x59, 0x51, 0xca, 0x12, 0xb1, 0x94, 0xb2, 0x2c, 0x59, 0x16, 0x5b, 0x56, 0x95, 0x84, 0xc5, 0x96, + 0x4b, 0xba, 0xc3, 0x7b, 0xfd, 0x32, 0xef, 0x6d, 0x98, 0x37, 0x36, 0xc7, 0x6f, 0x5c, 0xd0, 0x6f, + 0x6c, 0x29, 0xdd, 0xbf, 0xad, 0x64, 0x45, 0x6c, 0xd9, 0x51, 0xb2, 0x21, 0xb6, 0xc4, 0x95, 0x6c, + 0x8a, 0x2d, 0x09, 0x25, 0xbb, 0x62, 0xcb, 0x3e, 0x9a, 0x94, 0x36, 0xf3, 0x5e, 0x4e, 0x8f, 0x9e, + 0x2c, 0x39, 0x31, 0xe0, 0xcc, 0x69, 0x6b, 0xaf, 0x23, 0x77, 0xbe, 0xd3, 0x81, 0x2f, 0xa7, 0x43, + 0xfd, 0x14, 0x10, 0x5b, 0x8e, 0x94, 0xb2, 0x96, 0x9f, 0xe0, 0xcc, 0x00, 0x58, 0xde, 0x3f, 0xa5, + 0xac, 0x8b, 0x62, 0x8b, 0x25, 0x57, 0x95, 0x4e, 0x7c, 0xd5, 0xb3, 0xd7, 0xcd, 0x25, 0x9a, 0xcb, + 0x62, 0x4b, 0xd1, 0x63, 0xe9, 0x5f, 0x9b, 0x34, 0x01, 0x9a, 0x1d, 0xb1, 0xa5, 0xec, 0x91, 0xf5, + 0xaf, 0x20, 0x4d, 0x88, 0x26, 0xac, 0x9b, 0xb3, 0x07, 0x57, 0x99, 0x91, 0xed, 0x82, 0x4e, 0x14, + 0x22, 0x51, 0x88, 0x44, 0x21, 0x32, 0x84, 0xc8, 0x10, 0xc2, 0x35, 0x84, 0x4f, 0x48, 0x29, 0x6b, + 0x45, 0x6b, 0xf6, 0xd0, 0xec, 0xa1, 0xd9, 0xf3, 0x46, 0x61, 0x4b, 0x0d, 0xe9, 0x1e, 0xd2, 0x3d, + 0xa4, 0x7b, 0x48, 0xf7, 0x8c, 0x34, 0x86, 0x34, 0x86, 0x34, 0xc6, 0x04, 0xc4, 0x90, 0xc6, 0x90, + 0xc6, 0x90, 0xc6, 0x90, 0xc6, 0x8c, 0x34, 0x8e, 0x34, 0x8e, 0x34, 0x8e, 0x34, 0x8e, 0x34, 0x8e, + 0x34, 0x8e, 0x34, 0x8e, 0x34, 0x6e, 0xa4, 0x09, 0xa4, 0x09, 0xa4, 0x09, 0xa4, 0x09, 0xa4, 0x09, + 0xa4, 0x09, 0xa4, 0x09, 0xa4, 0x09, 0xa5, 0xac, 0x55, 0x2d, 0x4d, 0x22, 0x4d, 0x22, 0x4d, 0x22, + 0x4d, 0x22, 0x4d, 0x22, 0x4d, 0x22, 0x4d, 0x7a, 0x8b, 0x63, 0x4b, 0xd3, 0x5b, 0x1c, 0xfb, 0xf1, + 0xe2, 0x24, 0x27, 0x6a, 0x67, 0xfc, 0xb4, 0xa6, 0xe1, 0x0d, 0xe0, 0x0d, 0xe0, 0x0d, 0xe0, 0x0d, + 0xe0, 0x0d, 0xe0, 0x0d, 0x35, 0xd2, 0x68, 0x8f, 0xc6, 0x59, 0x1e, 0x0d, 0x53, 0xee, 0xbd, 0x71, + 0xb9, 0x53, 0x6f, 0x27, 0x6a, 0xb4, 0x43, 0xb4, 0xc1, 0x09, 0x06, 0x27, 0x5e, 0xbd, 0xe9, 0x66, + 0x83, 0x66, 0x93, 0x26, 0x40, 0xb3, 0x43, 0xb3, 0x4b, 0x13, 0xa4, 0x09, 0xd1, 0x84, 0x47, 0xdb, + 0xed, 0xac, 0x7a, 0x73, 0x7c, 0xd4, 0xdb, 0x93, 0x9a, 0x63, 0x5f, 0x1b, 0x70, 0x9a, 0xe3, 0x1d, + 0x5f, 0xea, 0x57, 0xcc, 0x3e, 0xdc, 0x98, 0xc2, 0x79, 0x6d, 0x81, 0xe4, 0xee, 0xdd, 0x19, 0xa2, + 0xff, 0xea, 0x05, 0xd8, 0x67, 0x01, 0xf6, 0x59, 0x80, 0x7d, 0x16, 0x60, 0x9f, 0x99, 0xdf, 0x67, + 0xe6, 0xf7, 0x59, 0x61, 0xef, 0x28, 0xde, 0xe5, 0x67, 0x58, 0x37, 0xde, 0xa2, 0x6e, 0x79, 0xff, + 0xf1, 0x00, 0xa8, 0xfb, 0xce, 0xb4, 0x28, 0xef, 0xce, 0x9c, 0x07, 0xad, 0x7f, 0xcf, 0xd4, 0x5d, + 0x8b, 0x54, 0x2d, 0x52, 0xb5, 0x48, 0xd5, 0x22, 0x55, 0x8b, 0x54, 0x2d, 0xea, 0xa1, 0x45, 0xa8, + 0x16, 0x85, 0xd0, 0xa2, 0x10, 0x5a, 0x44, 0x6c, 0x8d, 0xd1, 0xef, 0x9b, 0x39, 0x5e, 0x35, 0x27, + 0x4b, 0x18, 0x93, 0x30, 0x26, 0x61, 0xe8, 0x61, 0xe8, 0x61, 0xb0, 0x61, 0x40, 0x61, 0xa3, 0x89, + 0xa0, 0x89, 0xa0, 0x89, 0xa0, 0x89, 0xa0, 0x89, 0xa0, 0x89, 0xa0, 0x89, 0x98, 0xc1, 0x94, 0xd0, + 0x94, 0xd0, 0x94, 0x18, 0x4c, 0x09, 0x69, 0x09, 0x69, 0x69, 0x62, 0x13, 0x95, 0xce, 0xda, 0x44, + 0x25, 0xa5, 0xac, 0x25, 0x3d, 0xab, 0xee, 0x07, 0x4c, 0xa0, 0xfb, 0xa1, 0x1a, 0x0f, 0x6e, 0x49, + 0x7b, 0x1c, 0x03, 0x38, 0x46, 0x72, 0x6c, 0xfa, 0x87, 0xf4, 0x0f, 0xe9, 0x1f, 0x9a, 0xd5, 0x4f, + 0x11, 0x2d, 0x45, 0xb4, 0x14, 0xd1, 0x52, 0x44, 0x4b, 0x11, 0x2d, 0x35, 0xb9, 0xfa, 0x29, 0x10, + 0xe3, 0x02, 0x4a, 0x83, 0x48, 0x83, 0x48, 0x83, 0x48, 0x83, 0x48, 0x83, 0x48, 0x4f, 0x22, 0xd2, + 0x20, 0xd2, 0x06, 0xd1, 0x04, 0xd1, 0x04, 0xd1, 0x04, 0xd1, 0x04, 0xd1, 0x04, 0xd1, 0x9c, 0x44, + 0x34, 0x41, 0x34, 0xcd, 0x1c, 0x17, 0x40, 0x14, 0x40, 0x14, 0x40, 0x14, 0x40, 0x14, 0x40, 0x14, + 0x26, 0xe6, 0xb8, 0x70, 0xd6, 0x1c, 0x17, 0xa6, 0x15, 0xf0, 0x47, 0xbe, 0x36, 0xb2, 0xfb, 0xb1, + 0xa9, 0xb2, 0xe5, 0x69, 0xa4, 0x4f, 0x7c, 0x7d, 0x90, 0xed, 0x69, 0xf2, 0x4f, 0x17, 0x39, 0x09, + 0x3e, 0xf3, 0xe5, 0xb9, 0x39, 0x4d, 0xfe, 0xf9, 0x22, 0x9e, 0x5f, 0xf8, 0xf2, 0xdc, 0x36, 0x4f, + 0x81, 0x69, 0xa0, 0x2f, 0x67, 0x80, 0x46, 0x67, 0xd0, 0x57, 0xa7, 0xce, 0x20, 0x5b, 0x5f, 0x57, + 0xdc, 0xaf, 0x95, 0x6c, 0x89, 0x25, 0xcf, 0x2a, 0xd9, 0xa6, 0x09, 0xeb, 0xe6, 0x71, 0xd5, 0x65, + 0x28, 0x99, 0x0c, 0x25, 0x93, 0xa1, 0x64, 0x32, 0x94, 0x4c, 0x86, 0x92, 0xc9, 0x70, 0xc6, 0x64, + 0x28, 0x99, 0x0c, 0x84, 0x8c, 0xde, 0x55, 0xda, 0xf4, 0xa1, 0x87, 0xb6, 0xc5, 0xfd, 0xe6, 0x94, + 0xf9, 0x92, 0x36, 0x3d, 0xc0, 0xf4, 0x00, 0xc9, 0xc1, 0x28, 0x94, 0xfe, 0x44, 0x6d, 0xd1, 0x6c, + 0x4f, 0x7e, 0xb0, 0xf8, 0x98, 0x77, 0xc9, 0xd4, 0x25, 0x53, 0x97, 0x4c, 0x5d, 0x32, 0x75, 0xc9, + 0xd4, 0x25, 0x4c, 0x97, 0x03, 0xaf, 0xcb, 0x81, 0xd7, 0x05, 0xd4, 0x35, 0x17, 0x8a, 0x28, 0xa0, + 0x28, 0xa0, 0x28, 0xa0, 0x28, 0xa0, 0x28, 0xa0, 0x28, 0xa0, 0x28, 0xd2, 0xa8, 0x91, 0x4e, 0xfb, + 0x24, 0x4f, 0x7e, 0x8b, 0x77, 0x27, 0xe3, 0xaf, 0x6b, 0x69, 0x1b, 0x69, 0x1b, 0x69, 0x1b, 0x69, + 0x1b, 0x69, 0x1b, 0x69, 0x9b, 0x29, 0x6d, 0x73, 0x6d, 0x74, 0xbf, 0x85, 0xd4, 0x66, 0x34, 0x6d, + 0x46, 0xd3, 0x86, 0xdb, 0x36, 0x5c, 0x7f, 0x91, 0xe6, 0xb8, 0x1e, 0x50, 0x01, 0x65, 0xb0, 0x65, + 0xb0, 0x65, 0xb0, 0x65, 0xb0, 0x65, 0xb0, 0xe5, 0xc9, 0x73, 0xa7, 0x0c, 0xa2, 0x7c, 0xfe, 0x59, + 0x3c, 0xd7, 0x0d, 0x7c, 0xfd, 0xdc, 0x7b, 0xf7, 0xc2, 0x17, 0x6e, 0x8e, 0xc5, 0x2a, 0xd8, 0x2a, + 0xd8, 0x2a, 0xd8, 0x2a, 0xd8, 0x2a, 0xd8, 0x2a, 0xd8, 0x2a, 0xbc, 0x2a, 0xbc, 0x2a, 0xbc, 0x2a, + 0xbc, 0xaa, 0xe1, 0x55, 0xe0, 0x55, 0xe0, 0x55, 0xe0, 0x55, 0xe0, 0x55, 0xe0, 0x55, 0xe0, 0x55, + 0xe0, 0x55, 0xe0, 0x55, 0xe0, 0x55, 0xe0, 0x55, 0xcc, 0x7d, 0xd0, 0xfd, 0x6e, 0x7c, 0x21, 0x64, + 0x4d, 0x8a, 0x38, 0x14, 0x71, 0x28, 0xe2, 0x30, 0xfa, 0xcb, 0xa3, 0x88, 0x43, 0x71, 0x72, 0x4d, + 0x8a, 0x40, 0x8b, 0x66, 0x13, 0x39, 0x20, 0x1c, 0x10, 0x0e, 0x08, 0x07, 0x84, 0x03, 0xc2, 0x41, + 0xea, 0x90, 0xce, 0x21, 0x9d, 0x03, 0xc8, 0x99, 0x76, 0x12, 0x7d, 0xbf, 0xc8, 0x39, 0xf8, 0x83, + 0xaf, 0x73, 0x70, 0xdd, 0x3c, 0x5d, 0x32, 0x4f, 0x2b, 0xd3, 0x90, 0x3f, 0xfa, 0x42, 0xee, 0x9a, + 0xa7, 0xad, 0x69, 0xa0, 0x9f, 0x7c, 0x81, 0x76, 0xcc, 0xda, 0xe4, 0x98, 0xd8, 0x1c, 0x13, 0x9b, + 0x63, 0x62, 0x73, 0x4c, 0x6c, 0x8e, 0x89, 0xcd, 0xb1, 0xfa, 0x39, 0xe6, 0x37, 0xc7, 0x8c, 0xe6, + 0xc6, 0x27, 0xe6, 0xcf, 0x67, 0x9c, 0x98, 0xa3, 0x2b, 0xda, 0x2f, 0xc6, 0x27, 0x8b, 0x4f, 0x16, + 0x9f, 0x2c, 0x3e, 0x59, 0x7c, 0xb2, 0xf8, 0x64, 0xf1, 0xc9, 0xe2, 0x93, 0xc5, 0x27, 0x6b, 0x8e, + 0xdf, 0x2b, 0x9c, 0xaf, 0x57, 0xe8, 0xbf, 0x32, 0x36, 0x39, 0xe5, 0xf7, 0xab, 0xa9, 0xea, 0x3a, + 0x7e, 0x75, 0xfc, 0xea, 0xf8, 0xd5, 0xf1, 0xab, 0xe3, 0x57, 0xc7, 0xaf, 0x8e, 0x5f, 0x9d, 0xba, + 0xa9, 0x53, 0x37, 0x75, 0x5c, 0xea, 0x86, 0x57, 0x83, 0x57, 0x83, 0x57, 0x83, 0x37, 0xfa, 0xf3, + 0xb3, 0x06, 0xaf, 0x06, 0xaf, 0x06, 0xaf, 0x06, 0xaf, 0x06, 0xaf, 0x06, 0xaf, 0x36, 0x6d, 0xd9, + 0x7e, 0x5b, 0xa4, 0x0e, 0x7f, 0xf7, 0x77, 0x83, 0xf9, 0xc3, 0xec, 0xa2, 0x1e, 0x83, 0xe8, 0x31, + 0x88, 0x1e, 0x83, 0xe8, 0x31, 0x88, 0x1e, 0x83, 0xe8, 0x91, 0xbe, 0x47, 0xfa, 0x1e, 0xe9, 0x7b, + 0xa4, 0xef, 0x19, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, + 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x60, 0xda, 0x88, 0xfe, 0xf4, 0x55, 0xbd, 0x97, 0xcd, 0x53, + 0xd0, 0xd4, 0x57, 0x9e, 0x44, 0x79, 0x12, 0xe5, 0x49, 0x94, 0x27, 0x51, 0x9e, 0x44, 0x79, 0xd6, + 0x27, 0x4f, 0xb0, 0x3c, 0x51, 0xf2, 0xa6, 0xbe, 0x8e, 0xa8, 0xaf, 0x23, 0xfa, 0x8f, 0x4c, 0xff, + 0xbf, 0xbf, 0xeb, 0xf4, 0x1f, 0xd2, 0x7f, 0x48, 0xff, 0xa1, 0xa9, 0x94, 0x0e, 0x49, 0x3a, 0x24, + 0xe9, 0x90, 0xa4, 0x43, 0x92, 0x0e, 0x49, 0x3a, 0x24, 0xe9, 0x90, 0xa4, 0xc3, 0x14, 0x75, 0x98, + 0xa2, 0x0e, 0xbc, 0x8e, 0xe1, 0xcd, 0xf5, 0xa1, 0x3c, 0xf7, 0x0b, 0xb9, 0x36, 0xc7, 0x7d, 0x24, + 0xe0, 0xe3, 0x5a, 0xb2, 0x36, 0xc7, 0xd1, 0x1c, 0x58, 0xf4, 0x84, 0xfe, 0x6b, 0x91, 0x9d, 0xf1, + 0xf7, 0x0c, 0x11, 0x55, 0xdc, 0x27, 0x79, 0x9f, 0xe4, 0x7d, 0x92, 0xf7, 0x49, 0xde, 0x27, 0x79, + 0x9f, 0xc8, 0x7d, 0x22, 0xf7, 0x89, 0xdc, 0x27, 0x72, 0xdf, 0x4c, 0x81, 0xbf, 0x7d, 0x15, 0xf0, + 0xb1, 0xbd, 0xd6, 0xe6, 0xd8, 0x5e, 0x81, 0x45, 0x77, 0xd9, 0xa3, 0x19, 0xb3, 0xb3, 0x36, 0xc7, + 0xec, 0x04, 0x66, 0x4f, 0xd2, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x47, 0x5c, 0x26, 0x6b, + 0x16, 0x00, 0x00, + } + buf, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + panic(err) + } + dec := gob.NewDecoder(buf) + if err := dec.Decode(&tab); err != nil { + panic(err) + } + + for i, row := range tab { + actionTab[i].canRecover = row.CanRecover + for _, a := range row.Actions { + switch a.Action { + case 0: + actionTab[i].actions[a.Index] = accept(true) + case 1: + actionTab[i].actions[a.Index] = reduce(a.Amount) + case 2: + actionTab[i].actions[a.Index] = shift(a.Amount) + } + } + } +} diff --git a/vendor/github.com/awalterschulze/gographviz/internal/parser/gototable.go b/vendor/github.com/awalterschulze/gographviz/internal/parser/gototable.go new file mode 100644 index 0000000..dc33aac --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/parser/gototable.go @@ -0,0 +1,56 @@ +// Code generated by gocc; DO NOT EDIT. + +package parser + +import ( + "bytes" + "compress/gzip" + "encoding/gob" +) + +const numNTSymbols = 17 + +type ( + gotoTable [numStates]gotoRow + gotoRow [numNTSymbols]int +) + +var gotoTab = gotoTable{} + +func init() { + tab := [][]int{} + data := []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe2, 0xfd, 0xdf, 0xcd, 0xc4, 0xc8, + 0xf4, 0xbf, 0x87, 0x81, 0xf1, 0x7f, 0x17, 0x03, 0x03, 0xcf, 0xff, 0x4e, 0x10, 0xaf, 0x8b, 0x81, + 0x91, 0x85, 0x81, 0xe1, 0x1f, 0xa7, 0xc6, 0xff, 0x1e, 0x86, 0xff, 0x0d, 0x82, 0x8c, 0x4c, 0x8c, + 0xa8, 0x40, 0x90, 0x11, 0x1d, 0x60, 0x88, 0xf0, 0x10, 0xa1, 0x46, 0x4c, 0x90, 0x91, 0x51, 0x41, + 0x49, 0x45, 0x8b, 0x91, 0x91, 0x51, 0x83, 0x51, 0xcd, 0x88, 0x51, 0x87, 0x51, 0x8e, 0x08, 0x5d, + 0x98, 0x22, 0x36, 0x18, 0x22, 0x0e, 0x82, 0x8c, 0x8c, 0x2e, 0x44, 0x9a, 0xec, 0x81, 0x22, 0xe2, + 0x43, 0x9a, 0x7b, 0x02, 0xa0, 0x22, 0x61, 0x94, 0xfb, 0x82, 0x54, 0x91, 0x28, 0x10, 0x11, 0x03, + 0x13, 0x49, 0xc2, 0x50, 0x93, 0x82, 0x21, 0x92, 0xc6, 0xc8, 0xc8, 0x98, 0x81, 0xa2, 0x0b, 0x01, + 0x72, 0xb0, 0xda, 0x55, 0x80, 0x11, 0x1a, 0x25, 0x44, 0x84, 0x4f, 0x15, 0x79, 0x71, 0x8a, 0x11, + 0x86, 0xff, 0x9b, 0x88, 0x35, 0xe8, 0x7f, 0xd7, 0xff, 0x1e, 0x18, 0xb3, 0x8d, 0x88, 0xf8, 0xf8, + 0x3f, 0x89, 0x08, 0x47, 0xfd, 0x9f, 0x46, 0x51, 0x1c, 0xfd, 0x9f, 0x05, 0x37, 0x68, 0x0e, 0xaa, + 0xaa, 0xff, 0x4b, 0x18, 0xff, 0x2f, 0x62, 0xfc, 0xbf, 0x80, 0xe6, 0xc9, 0xe4, 0xff, 0x0a, 0x54, + 0x27, 0xfc, 0x5f, 0x43, 0xa5, 0x98, 0x21, 0x46, 0xcd, 0xff, 0x6d, 0xc4, 0x58, 0x86, 0xa9, 0x6d, + 0x0f, 0x35, 0xdd, 0xf8, 0xff, 0x10, 0x5a, 0xc2, 0x20, 0x32, 0xe4, 0x18, 0x19, 0xff, 0x9f, 0x20, + 0x4f, 0x1f, 0xbd, 0x45, 0xfe, 0x5f, 0x60, 0xfc, 0x7f, 0x0e, 0x92, 0x98, 0xfe, 0x5f, 0xc2, 0x17, + 0x74, 0x01, 0xe4, 0xda, 0xf7, 0xff, 0x1a, 0xf9, 0x39, 0xf8, 0x0e, 0x15, 0x8a, 0x77, 0x0a, 0x0a, + 0x58, 0x0a, 0x8a, 0x65, 0x7c, 0xf1, 0xff, 0xff, 0x09, 0x69, 0x46, 0xbd, 0xc0, 0x63, 0xd4, 0x2b, + 0xf2, 0x03, 0xf7, 0xd3, 0xc0, 0x06, 0xee, 0x60, 0x13, 0xc1, 0x0c, 0xa0, 0x3f, 0xf4, 0x0a, 0x20, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8f, 0xfa, 0xd1, 0x1a, 0x46, 0x09, 0x00, 0x00, + } + buf, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + panic(err) + } + dec := gob.NewDecoder(buf) + if err := dec.Decode(&tab); err != nil { + panic(err) + } + for i := 0; i < numStates; i++ { + for j := 0; j < numNTSymbols; j++ { + gotoTab[i][j] = tab[i][j] + } + } +} diff --git a/vendor/github.com/awalterschulze/gographviz/internal/parser/main.go b/vendor/github.com/awalterschulze/gographviz/internal/parser/main.go new file mode 100644 index 0000000..0253f93 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/parser/main.go @@ -0,0 +1,72 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +//A parser for the DOT grammar. +package parser + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/awalterschulze/gographviz/ast" + "github.com/awalterschulze/gographviz/internal/lexer" +) + +//Parses a DOT string and outputs the +//abstract syntax tree representing the graph. +func ParseString(dotString string) (*ast.Graph, error) { + return ParseBytes([]byte(dotString)) +} + +//Parses the bytes representing a DOT string +//and outputs the abstract syntax tree representing the graph. +func ParseBytes(dotBytes []byte) (*ast.Graph, error) { + lex := lexer.NewLexer(dotBytes) + parser := NewParser() + st, err := parser.Parse(lex) + if err != nil { + return nil, err + } + g, ok := st.(*ast.Graph) + if !ok { + panic(fmt.Sprintf("Parser did not return an *ast.Graph, but rather a %T", st)) + } + return g, nil +} + +//Parses a reader which contains a DOT string +//and outputs the abstract syntax tree representing the graph. +func Parse(r io.Reader) (*ast.Graph, error) { + bytes, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return ParseBytes(bytes) +} + +//Parses a file which contains a DOT string +//and outputs the abstract syntax tree representing the graph. +func ParseFile(filename string) (*ast.Graph, error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + g, err := Parse(f) + if err := f.Close(); err != nil { + return nil, err + } + return g, err +} diff --git a/vendor/github.com/awalterschulze/gographviz/internal/parser/parser.go b/vendor/github.com/awalterschulze/gographviz/internal/parser/parser.go new file mode 100644 index 0000000..8d49425 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/parser/parser.go @@ -0,0 +1,216 @@ +// Code generated by gocc; DO NOT EDIT. + +package parser + +import ( + "bytes" + "fmt" + + parseError "github.com/awalterschulze/gographviz/internal/errors" + "github.com/awalterschulze/gographviz/internal/token" +) + +const ( + numProductions = 60 + numStates = 128 + numSymbols = 36 +) + +// Stack + +type stack struct { + state []int + attrib []Attrib +} + +const iNITIAL_STACK_SIZE = 100 + +func newStack() *stack { + return &stack{ + state: make([]int, 0, iNITIAL_STACK_SIZE), + attrib: make([]Attrib, 0, iNITIAL_STACK_SIZE), + } +} + +func (s *stack) reset() { + s.state = s.state[:0] + s.attrib = s.attrib[:0] +} + +func (s *stack) push(state int, a Attrib) { + s.state = append(s.state, state) + s.attrib = append(s.attrib, a) +} + +func (s *stack) top() int { + return s.state[len(s.state)-1] +} + +func (s *stack) peek(pos int) int { + return s.state[pos] +} + +func (s *stack) topIndex() int { + return len(s.state) - 1 +} + +func (s *stack) popN(items int) []Attrib { + lo, hi := len(s.state)-items, len(s.state) + + attrib := s.attrib[lo:hi] + + s.state = s.state[:lo] + s.attrib = s.attrib[:lo] + + return attrib +} + +func (s *stack) String() string { + w := new(bytes.Buffer) + fmt.Fprintf(w, "stack:\n") + for i, st := range s.state { + fmt.Fprintf(w, "\t%d: %d , ", i, st) + if s.attrib[i] == nil { + fmt.Fprintf(w, "nil") + } else { + switch attr := s.attrib[i].(type) { + case *token.Token: + fmt.Fprintf(w, "%s", attr.Lit) + default: + fmt.Fprintf(w, "%v", attr) + } + } + fmt.Fprintf(w, "\n") + } + return w.String() +} + +// Parser + +type Parser struct { + stack *stack + nextToken *token.Token + pos int +} + +type Scanner interface { + Scan() (tok *token.Token) +} + +func NewParser() *Parser { + p := &Parser{stack: newStack()} + p.Reset() + return p +} + +func (p *Parser) Reset() { + p.stack.reset() + p.stack.push(0, nil) +} + +func (p *Parser) Error(err error, scanner Scanner) (recovered bool, errorAttrib *parseError.Error) { + errorAttrib = &parseError.Error{ + Err: err, + ErrorToken: p.nextToken, + ErrorSymbols: p.popNonRecoveryStates(), + ExpectedTokens: make([]string, 0, 8), + } + for t, action := range actionTab[p.stack.top()].actions { + if action != nil { + errorAttrib.ExpectedTokens = append(errorAttrib.ExpectedTokens, token.TokMap.Id(token.Type(t))) + } + } + + if action := actionTab[p.stack.top()].actions[token.TokMap.Type("error")]; action != nil { + p.stack.push(int(action.(shift)), errorAttrib) // action can only be shift + } else { + return + } + + if action := actionTab[p.stack.top()].actions[p.nextToken.Type]; action != nil { + recovered = true + } + for !recovered && p.nextToken.Type != token.EOF { + p.nextToken = scanner.Scan() + if action := actionTab[p.stack.top()].actions[p.nextToken.Type]; action != nil { + recovered = true + } + } + + return +} + +func (p *Parser) popNonRecoveryStates() (removedAttribs []parseError.ErrorSymbol) { + if rs, ok := p.firstRecoveryState(); ok { + errorSymbols := p.stack.popN(p.stack.topIndex() - rs) + removedAttribs = make([]parseError.ErrorSymbol, len(errorSymbols)) + for i, e := range errorSymbols { + removedAttribs[i] = e + } + } else { + removedAttribs = []parseError.ErrorSymbol{} + } + return +} + +// recoveryState points to the highest state on the stack, which can recover +func (p *Parser) firstRecoveryState() (recoveryState int, canRecover bool) { + recoveryState, canRecover = p.stack.topIndex(), actionTab[p.stack.top()].canRecover + for recoveryState > 0 && !canRecover { + recoveryState-- + canRecover = actionTab[p.stack.peek(recoveryState)].canRecover + } + return +} + +func (p *Parser) newError(err error) error { + e := &parseError.Error{ + Err: err, + StackTop: p.stack.top(), + ErrorToken: p.nextToken, + } + actRow := actionTab[p.stack.top()] + for i, t := range actRow.actions { + if t != nil { + e.ExpectedTokens = append(e.ExpectedTokens, token.TokMap.Id(token.Type(i))) + } + } + return e +} + +func (p *Parser) Parse(scanner Scanner) (res interface{}, err error) { + p.Reset() + p.nextToken = scanner.Scan() + for acc := false; !acc; { + action := actionTab[p.stack.top()].actions[p.nextToken.Type] + if action == nil { + if recovered, errAttrib := p.Error(nil, scanner); !recovered { + p.nextToken = errAttrib.ErrorToken + return nil, p.newError(nil) + } + if action = actionTab[p.stack.top()].actions[p.nextToken.Type]; action == nil { + panic("Error recovery led to invalid action") + } + } + + switch act := action.(type) { + case accept: + res = p.stack.popN(1)[0] + acc = true + case shift: + p.stack.push(int(act), p.nextToken) + p.nextToken = scanner.Scan() + case reduce: + prod := productionsTable[int(act)] + attrib, err := prod.ReduceFunc(p.stack.popN(prod.NumSymbols)) + if err != nil { + return nil, p.newError(err) + } else { + p.stack.push(gotoTab[p.stack.top()][prod.NTType], attrib) + } + default: + panic("unknown action: " + action.String()) + } + } + return res, nil +} diff --git a/vendor/github.com/awalterschulze/gographviz/internal/parser/productionstable.go b/vendor/github.com/awalterschulze/gographviz/internal/parser/productionstable.go new file mode 100644 index 0000000..3262aef --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/parser/productionstable.go @@ -0,0 +1,623 @@ +// Code generated by gocc; DO NOT EDIT. + +package parser + +import "github.com/awalterschulze/gographviz/ast" + +type ( + //TODO: change type and variable names to be consistent with other tables + ProdTab [numProductions]ProdTabEntry + ProdTabEntry struct { + String string + Id string + NTType int + Index int + NumSymbols int + ReduceFunc func([]Attrib) (Attrib, error) + } + Attrib interface { + } +) + +var productionsTable = ProdTab{ + ProdTabEntry{ + String: `S' : DotGraph << >>`, + Id: "S'", + NTType: 0, + Index: 0, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return X[0], nil + }, + }, + ProdTabEntry{ + String: `DotGraph : graphx "{" "}" << ast.NewGraph(ast.GRAPH, ast.FALSE, nil, nil) >>`, + Id: "DotGraph", + NTType: 1, + Index: 1, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.GRAPH, ast.FALSE, nil, nil) + }, + }, + ProdTabEntry{ + String: `DotGraph : strict graphx "{" "}" << ast.NewGraph(ast.GRAPH, ast.TRUE, nil, nil) >>`, + Id: "DotGraph", + NTType: 1, + Index: 2, + NumSymbols: 4, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.GRAPH, ast.TRUE, nil, nil) + }, + }, + ProdTabEntry{ + String: `DotGraph : graphx Id "{" "}" << ast.NewGraph(ast.GRAPH, ast.FALSE, X[1], nil) >>`, + Id: "DotGraph", + NTType: 1, + Index: 3, + NumSymbols: 4, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.GRAPH, ast.FALSE, X[1], nil) + }, + }, + ProdTabEntry{ + String: `DotGraph : strict graphx Id "{" "}" << ast.NewGraph(ast.GRAPH, ast.TRUE, X[2], nil) >>`, + Id: "DotGraph", + NTType: 1, + Index: 4, + NumSymbols: 5, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.GRAPH, ast.TRUE, X[2], nil) + }, + }, + ProdTabEntry{ + String: `DotGraph : graphx "{" StmtList "}" << ast.NewGraph(ast.GRAPH, ast.FALSE, nil, X[2]) >>`, + Id: "DotGraph", + NTType: 1, + Index: 5, + NumSymbols: 4, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.GRAPH, ast.FALSE, nil, X[2]) + }, + }, + ProdTabEntry{ + String: `DotGraph : graphx Id "{" StmtList "}" << ast.NewGraph(ast.GRAPH, ast.FALSE, X[1], X[3]) >>`, + Id: "DotGraph", + NTType: 1, + Index: 6, + NumSymbols: 5, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.GRAPH, ast.FALSE, X[1], X[3]) + }, + }, + ProdTabEntry{ + String: `DotGraph : strict graphx "{" StmtList "}" << ast.NewGraph(ast.GRAPH, ast.TRUE, nil, X[3]) >>`, + Id: "DotGraph", + NTType: 1, + Index: 7, + NumSymbols: 5, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.GRAPH, ast.TRUE, nil, X[3]) + }, + }, + ProdTabEntry{ + String: `DotGraph : strict graphx Id "{" StmtList "}" << ast.NewGraph(ast.GRAPH, ast.TRUE, X[2], X[4]) >>`, + Id: "DotGraph", + NTType: 1, + Index: 8, + NumSymbols: 6, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.GRAPH, ast.TRUE, X[2], X[4]) + }, + }, + ProdTabEntry{ + String: `DotGraph : digraph "{" "}" << ast.NewGraph(ast.DIGRAPH, ast.FALSE, nil, nil) >>`, + Id: "DotGraph", + NTType: 1, + Index: 9, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.DIGRAPH, ast.FALSE, nil, nil) + }, + }, + ProdTabEntry{ + String: `DotGraph : strict digraph "{" "}" << ast.NewGraph(ast.DIGRAPH, ast.TRUE, nil, nil) >>`, + Id: "DotGraph", + NTType: 1, + Index: 10, + NumSymbols: 4, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.DIGRAPH, ast.TRUE, nil, nil) + }, + }, + ProdTabEntry{ + String: `DotGraph : digraph Id "{" "}" << ast.NewGraph(ast.DIGRAPH, ast.FALSE, X[1], nil) >>`, + Id: "DotGraph", + NTType: 1, + Index: 11, + NumSymbols: 4, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.DIGRAPH, ast.FALSE, X[1], nil) + }, + }, + ProdTabEntry{ + String: `DotGraph : strict digraph Id "{" "}" << ast.NewGraph(ast.DIGRAPH, ast.TRUE, X[2], nil) >>`, + Id: "DotGraph", + NTType: 1, + Index: 12, + NumSymbols: 5, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.DIGRAPH, ast.TRUE, X[2], nil) + }, + }, + ProdTabEntry{ + String: `DotGraph : digraph "{" StmtList "}" << ast.NewGraph(ast.DIGRAPH, ast.FALSE, nil, X[2]) >>`, + Id: "DotGraph", + NTType: 1, + Index: 13, + NumSymbols: 4, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.DIGRAPH, ast.FALSE, nil, X[2]) + }, + }, + ProdTabEntry{ + String: `DotGraph : digraph Id "{" StmtList "}" << ast.NewGraph(ast.DIGRAPH, ast.FALSE, X[1], X[3]) >>`, + Id: "DotGraph", + NTType: 1, + Index: 14, + NumSymbols: 5, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.DIGRAPH, ast.FALSE, X[1], X[3]) + }, + }, + ProdTabEntry{ + String: `DotGraph : strict digraph "{" StmtList "}" << ast.NewGraph(ast.DIGRAPH, ast.TRUE, nil, X[3]) >>`, + Id: "DotGraph", + NTType: 1, + Index: 15, + NumSymbols: 5, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.DIGRAPH, ast.TRUE, nil, X[3]) + }, + }, + ProdTabEntry{ + String: `DotGraph : strict digraph Id "{" StmtList "}" << ast.NewGraph(ast.DIGRAPH, ast.TRUE, X[2], X[4]) >>`, + Id: "DotGraph", + NTType: 1, + Index: 16, + NumSymbols: 6, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraph(ast.DIGRAPH, ast.TRUE, X[2], X[4]) + }, + }, + ProdTabEntry{ + String: `StmtList : Stmt1 << ast.NewStmtList(X[0]) >>`, + Id: "StmtList", + NTType: 2, + Index: 17, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewStmtList(X[0]) + }, + }, + ProdTabEntry{ + String: `StmtList : StmtList Stmt1 << ast.AppendStmtList(X[0], X[1]) >>`, + Id: "StmtList", + NTType: 2, + Index: 18, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.AppendStmtList(X[0], X[1]) + }, + }, + ProdTabEntry{ + String: `Stmt1 : Stmt << X[0], nil >>`, + Id: "Stmt1", + NTType: 3, + Index: 19, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return X[0], nil + }, + }, + ProdTabEntry{ + String: `Stmt1 : Stmt ";" << X[0], nil >>`, + Id: "Stmt1", + NTType: 3, + Index: 20, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return X[0], nil + }, + }, + ProdTabEntry{ + String: `Stmt : Id "=" Id << ast.NewAttr(X[0], X[2]) >>`, + Id: "Stmt", + NTType: 4, + Index: 21, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewAttr(X[0], X[2]) + }, + }, + ProdTabEntry{ + String: `Stmt : NodeStmt << X[0], nil >>`, + Id: "Stmt", + NTType: 4, + Index: 22, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return X[0], nil + }, + }, + ProdTabEntry{ + String: `Stmt : EdgeStmt << X[0], nil >>`, + Id: "Stmt", + NTType: 4, + Index: 23, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return X[0], nil + }, + }, + ProdTabEntry{ + String: `Stmt : AttrStmt << X[0], nil >>`, + Id: "Stmt", + NTType: 4, + Index: 24, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return X[0], nil + }, + }, + ProdTabEntry{ + String: `Stmt : SubGraphStmt << X[0], nil >>`, + Id: "Stmt", + NTType: 4, + Index: 25, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return X[0], nil + }, + }, + ProdTabEntry{ + String: `AttrStmt : graphx AttrList << ast.NewGraphAttrs(X[1]) >>`, + Id: "AttrStmt", + NTType: 5, + Index: 26, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewGraphAttrs(X[1]) + }, + }, + ProdTabEntry{ + String: `AttrStmt : node AttrList << ast.NewNodeAttrs(X[1]) >>`, + Id: "AttrStmt", + NTType: 5, + Index: 27, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewNodeAttrs(X[1]) + }, + }, + ProdTabEntry{ + String: `AttrStmt : edge AttrList << ast.NewEdgeAttrs(X[1]) >>`, + Id: "AttrStmt", + NTType: 5, + Index: 28, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewEdgeAttrs(X[1]) + }, + }, + ProdTabEntry{ + String: `AttrList : "[" "]" << ast.NewAttrList(nil) >>`, + Id: "AttrList", + NTType: 6, + Index: 29, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewAttrList(nil) + }, + }, + ProdTabEntry{ + String: `AttrList : "[" AList "]" << ast.NewAttrList(X[1]) >>`, + Id: "AttrList", + NTType: 6, + Index: 30, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewAttrList(X[1]) + }, + }, + ProdTabEntry{ + String: `AttrList : AttrList "[" "]" << ast.AppendAttrList(X[0], nil) >>`, + Id: "AttrList", + NTType: 6, + Index: 31, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.AppendAttrList(X[0], nil) + }, + }, + ProdTabEntry{ + String: `AttrList : AttrList "[" AList "]" << ast.AppendAttrList(X[0], X[2]) >>`, + Id: "AttrList", + NTType: 6, + Index: 32, + NumSymbols: 4, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.AppendAttrList(X[0], X[2]) + }, + }, + ProdTabEntry{ + String: `AList : Attr << ast.NewAList(X[0]) >>`, + Id: "AList", + NTType: 7, + Index: 33, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewAList(X[0]) + }, + }, + ProdTabEntry{ + String: `AList : AList Attr << ast.AppendAList(X[0], X[1]) >>`, + Id: "AList", + NTType: 7, + Index: 34, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.AppendAList(X[0], X[1]) + }, + }, + ProdTabEntry{ + String: `AList : AList "," Attr << ast.AppendAList(X[0], X[2]) >>`, + Id: "AList", + NTType: 7, + Index: 35, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.AppendAList(X[0], X[2]) + }, + }, + ProdTabEntry{ + String: `Attr : Id << ast.NewAttr(X[0], nil) >>`, + Id: "Attr", + NTType: 8, + Index: 36, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewAttr(X[0], nil) + }, + }, + ProdTabEntry{ + String: `Attr : Id "=" Id << ast.NewAttr(X[0], X[2]) >>`, + Id: "Attr", + NTType: 8, + Index: 37, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewAttr(X[0], X[2]) + }, + }, + ProdTabEntry{ + String: `EdgeStmt : NodeId EdgeRHS << ast.NewEdgeStmt(X[0], X[1], nil) >>`, + Id: "EdgeStmt", + NTType: 9, + Index: 38, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewEdgeStmt(X[0], X[1], nil) + }, + }, + ProdTabEntry{ + String: `EdgeStmt : NodeId EdgeRHS AttrList << ast.NewEdgeStmt(X[0], X[1], X[2]) >>`, + Id: "EdgeStmt", + NTType: 9, + Index: 39, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewEdgeStmt(X[0], X[1], X[2]) + }, + }, + ProdTabEntry{ + String: `EdgeStmt : SubGraphStmt EdgeRHS << ast.NewEdgeStmt(X[0], X[1], nil) >>`, + Id: "EdgeStmt", + NTType: 9, + Index: 40, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewEdgeStmt(X[0], X[1], nil) + }, + }, + ProdTabEntry{ + String: `EdgeStmt : SubGraphStmt EdgeRHS AttrList << ast.NewEdgeStmt(X[0], X[1], X[2]) >>`, + Id: "EdgeStmt", + NTType: 9, + Index: 41, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewEdgeStmt(X[0], X[1], X[2]) + }, + }, + ProdTabEntry{ + String: `EdgeRHS : EdgeOp NodeId << ast.NewEdgeRHS(X[0], X[1]) >>`, + Id: "EdgeRHS", + NTType: 10, + Index: 42, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewEdgeRHS(X[0], X[1]) + }, + }, + ProdTabEntry{ + String: `EdgeRHS : EdgeOp SubGraphStmt << ast.NewEdgeRHS(X[0], X[1]) >>`, + Id: "EdgeRHS", + NTType: 10, + Index: 43, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewEdgeRHS(X[0], X[1]) + }, + }, + ProdTabEntry{ + String: `EdgeRHS : EdgeRHS EdgeOp NodeId << ast.AppendEdgeRHS(X[0], X[1], X[2]) >>`, + Id: "EdgeRHS", + NTType: 10, + Index: 44, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.AppendEdgeRHS(X[0], X[1], X[2]) + }, + }, + ProdTabEntry{ + String: `EdgeRHS : EdgeRHS EdgeOp SubGraphStmt << ast.AppendEdgeRHS(X[0], X[1], X[2]) >>`, + Id: "EdgeRHS", + NTType: 10, + Index: 45, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.AppendEdgeRHS(X[0], X[1], X[2]) + }, + }, + ProdTabEntry{ + String: `NodeStmt : NodeId << ast.NewNodeStmt(X[0], nil) >>`, + Id: "NodeStmt", + NTType: 11, + Index: 46, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewNodeStmt(X[0], nil) + }, + }, + ProdTabEntry{ + String: `NodeStmt : NodeId AttrList << ast.NewNodeStmt(X[0], X[1]) >>`, + Id: "NodeStmt", + NTType: 11, + Index: 47, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewNodeStmt(X[0], X[1]) + }, + }, + ProdTabEntry{ + String: `NodeId : Id << ast.NewNodeID(X[0], nil) >>`, + Id: "NodeId", + NTType: 12, + Index: 48, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewNodeID(X[0], nil) + }, + }, + ProdTabEntry{ + String: `NodeId : Id Port << ast.NewNodeID(X[0], X[1]) >>`, + Id: "NodeId", + NTType: 12, + Index: 49, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewNodeID(X[0], X[1]) + }, + }, + ProdTabEntry{ + String: `Port : ":" Id << ast.NewPort(X[1], nil), nil >>`, + Id: "Port", + NTType: 13, + Index: 50, + NumSymbols: 2, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewPort(X[1], nil), nil + }, + }, + ProdTabEntry{ + String: `Port : ":" Id ":" Id << ast.NewPort(X[1], X[3]), nil >>`, + Id: "Port", + NTType: 13, + Index: 51, + NumSymbols: 4, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewPort(X[1], X[3]), nil + }, + }, + ProdTabEntry{ + String: `SubGraphStmt : "{" StmtList "}" << ast.NewSubGraph(nil, X[1]) >>`, + Id: "SubGraphStmt", + NTType: 14, + Index: 52, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewSubGraph(nil, X[1]) + }, + }, + ProdTabEntry{ + String: `SubGraphStmt : subgraph "{" StmtList "}" << ast.NewSubGraph(nil, X[2]) >>`, + Id: "SubGraphStmt", + NTType: 14, + Index: 53, + NumSymbols: 4, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewSubGraph(nil, X[2]) + }, + }, + ProdTabEntry{ + String: `SubGraphStmt : subgraph Id "{" StmtList "}" << ast.NewSubGraph(X[1], X[3]) >>`, + Id: "SubGraphStmt", + NTType: 14, + Index: 54, + NumSymbols: 5, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewSubGraph(X[1], X[3]) + }, + }, + ProdTabEntry{ + String: `SubGraphStmt : subgraph "{" "}" << ast.NewSubGraph(nil, nil) >>`, + Id: "SubGraphStmt", + NTType: 14, + Index: 55, + NumSymbols: 3, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewSubGraph(nil, nil) + }, + }, + ProdTabEntry{ + String: `SubGraphStmt : subgraph Id "{" "}" << ast.NewSubGraph(X[1], nil) >>`, + Id: "SubGraphStmt", + NTType: 14, + Index: 56, + NumSymbols: 4, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewSubGraph(X[1], nil) + }, + }, + ProdTabEntry{ + String: `EdgeOp : "->" << ast.DIRECTED, nil >>`, + Id: "EdgeOp", + NTType: 15, + Index: 57, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.DIRECTED, nil + }, + }, + ProdTabEntry{ + String: `EdgeOp : "--" << ast.UNDIRECTED, nil >>`, + Id: "EdgeOp", + NTType: 15, + Index: 58, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.UNDIRECTED, nil + }, + }, + ProdTabEntry{ + String: `Id : id << ast.NewID(X[0]) >>`, + Id: "Id", + NTType: 16, + Index: 59, + NumSymbols: 1, + ReduceFunc: func(X []Attrib) (Attrib, error) { + return ast.NewID(X[0]) + }, + }, +} diff --git a/vendor/github.com/awalterschulze/gographviz/internal/token/token.go b/vendor/github.com/awalterschulze/gographviz/internal/token/token.go new file mode 100644 index 0000000..873fe8e --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/internal/token/token.go @@ -0,0 +1,104 @@ +// Code generated by gocc; DO NOT EDIT. + +package token + +import ( + "fmt" +) + +type Token struct { + Type + Lit []byte + Pos +} + +type Type int + +const ( + INVALID Type = iota + EOF +) + +type Pos struct { + Offset int + Line int + Column int +} + +func (p Pos) String() string { + return fmt.Sprintf("Pos(offset=%d, line=%d, column=%d)", p.Offset, p.Line, p.Column) +} + +type TokenMap struct { + typeMap []string + idMap map[string]Type +} + +func (m TokenMap) Id(tok Type) string { + if int(tok) < len(m.typeMap) { + return m.typeMap[tok] + } + return "unknown" +} + +func (m TokenMap) Type(tok string) Type { + if typ, exist := m.idMap[tok]; exist { + return typ + } + return INVALID +} + +func (m TokenMap) TokenString(tok *Token) string { + //TODO: refactor to print pos & token string properly + return fmt.Sprintf("%s(%d,%s)", m.Id(tok.Type), tok.Type, tok.Lit) +} + +func (m TokenMap) StringType(typ Type) string { + return fmt.Sprintf("%s(%d)", m.Id(typ), typ) +} + +var TokMap = TokenMap{ + typeMap: []string{ + "INVALID", + "$", + "graphx", + "{", + "}", + "strict", + "digraph", + ";", + "=", + "node", + "edge", + "[", + "]", + ",", + ":", + "subgraph", + "->", + "--", + "id", + }, + + idMap: map[string]Type{ + "INVALID": 0, + "$": 1, + "graphx": 2, + "{": 3, + "}": 4, + "strict": 5, + "digraph": 6, + ";": 7, + "=": 8, + "node": 9, + "edge": 10, + "[": 11, + "]": 12, + ",": 13, + ":": 14, + "subgraph": 15, + "->": 16, + "--": 17, + "id": 18, + }, +} diff --git a/vendor/github.com/awalterschulze/gographviz/nodes.go b/vendor/github.com/awalterschulze/gographviz/nodes.go new file mode 100644 index 0000000..960c2fe --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/nodes.go @@ -0,0 +1,78 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import ( + "fmt" + "sort" +) + +// Node represents a Node. +type Node struct { + Name string + Attrs Attrs +} + +// Nodes represents a set of Nodes. +type Nodes struct { + Lookup map[string]*Node + Nodes []*Node +} + +// NewNodes creates a new set of Nodes. +func NewNodes() *Nodes { + return &Nodes{make(map[string]*Node), make([]*Node, 0)} +} + +// Remove removes a node +func (nodes *Nodes) Remove(name string) error { + for i := 0; i < len(nodes.Nodes); i++ { + if nodes.Nodes[i].Name != name { + continue + } + + nodes.Nodes = append(nodes.Nodes[:i], nodes.Nodes[i+1:]...) + delete(nodes.Lookup, name) + + return nil + } + + return fmt.Errorf("node %s not found", name) +} + +// Add adds a Node to the set of Nodes, extending the attributes of an already existing node. +func (nodes *Nodes) Add(node *Node) { + n, ok := nodes.Lookup[node.Name] + if ok { + n.Attrs.Extend(node.Attrs) + return + } + nodes.Lookup[node.Name] = node + nodes.Nodes = append(nodes.Nodes, node) +} + +// Sorted returns a sorted list of nodes. +func (nodes Nodes) Sorted() []*Node { + keys := make([]string, 0, len(nodes.Lookup)) + for key := range nodes.Lookup { + keys = append(keys, key) + } + sort.Strings(keys) + nodeList := make([]*Node, len(keys)) + for i := range keys { + nodeList[i] = nodes.Lookup[keys[i]] + } + return nodeList +} diff --git a/vendor/github.com/awalterschulze/gographviz/relations.go b/vendor/github.com/awalterschulze/gographviz/relations.go new file mode 100644 index 0000000..64a65b5 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/relations.go @@ -0,0 +1,64 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import ( + "sort" +) + +// Relations represents the relations between graphs and nodes. +// Each node belongs the main graph or a subgraph. +type Relations struct { + ParentToChildren map[string]map[string]bool + ChildToParents map[string]map[string]bool +} + +// NewRelations creates an empty set of relations. +func NewRelations() *Relations { + return &Relations{make(map[string]map[string]bool), make(map[string]map[string]bool)} +} + +// Add adds a node to a parent graph. +func (relations *Relations) Add(parent string, child string) { + if _, ok := relations.ParentToChildren[parent]; !ok { + relations.ParentToChildren[parent] = make(map[string]bool) + } + relations.ParentToChildren[parent][child] = true + if _, ok := relations.ChildToParents[child]; !ok { + relations.ChildToParents[child] = make(map[string]bool) + } + relations.ChildToParents[child][parent] = true +} + +// Remove removes relation +func (relations *Relations) Remove(parent string, child string) { + if _, ok := relations.ParentToChildren[parent]; ok { + delete(relations.ParentToChildren[parent], child) + } + + if _, ok := relations.ChildToParents[child]; ok { + delete(relations.ChildToParents[child], parent) + } +} + +// SortedChildren returns a list of sorted children of the given parent graph. +func (relations *Relations) SortedChildren(parent string) []string { + keys := make([]string, 0) + for key := range relations.ParentToChildren[parent] { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/awalterschulze/gographviz/subgraphs.go b/vendor/github.com/awalterschulze/gographviz/subgraphs.go new file mode 100644 index 0000000..092de01 --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/subgraphs.go @@ -0,0 +1,69 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import ( + "sort" +) + +// SubGraph represents a Subgraph. +type SubGraph struct { + Attrs Attrs + Name string +} + +// NewSubGraph creates a new Subgraph. +func NewSubGraph(name string) *SubGraph { + return &SubGraph{ + Attrs: make(Attrs), + Name: name, + } +} + +// SubGraphs represents a set of SubGraphs. +type SubGraphs struct { + SubGraphs map[string]*SubGraph +} + +// NewSubGraphs creates a new blank set of SubGraphs. +func NewSubGraphs() *SubGraphs { + return &SubGraphs{make(map[string]*SubGraph)} +} + +// Add adds and creates a new Subgraph to the set of SubGraphs. +func (subgraphs *SubGraphs) Add(name string) { + if _, ok := subgraphs.SubGraphs[name]; !ok { + subgraphs.SubGraphs[name] = NewSubGraph(name) + } +} + +// Remove removes a subgraph +func (subgraphs *SubGraphs) Remove(name string) { + delete(subgraphs.SubGraphs, name) +} + +// Sorted returns a sorted list of SubGraphs. +func (subgraphs *SubGraphs) Sorted() []*SubGraph { + keys := make([]string, 0) + for key := range subgraphs.SubGraphs { + keys = append(keys, key) + } + sort.Strings(keys) + s := make([]*SubGraph, len(keys)) + for i, key := range keys { + s[i] = subgraphs.SubGraphs[key] + } + return s +} diff --git a/vendor/github.com/awalterschulze/gographviz/write.go b/vendor/github.com/awalterschulze/gographviz/write.go new file mode 100644 index 0000000..2c18a5b --- /dev/null +++ b/vendor/github.com/awalterschulze/gographviz/write.go @@ -0,0 +1,172 @@ +//Copyright 2013 GoGraphviz Authors +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package gographviz + +import ( + "fmt" + + "github.com/awalterschulze/gographviz/ast" +) + +type writer struct { + *Graph + writtenLocations map[string]bool +} + +func newWriter(g *Graph) *writer { + return &writer{g, make(map[string]bool)} +} + +func appendAttrs(list ast.StmtList, attrs Attrs) ast.StmtList { + for _, name := range attrs.sortedNames() { + stmt := &ast.Attr{ + Field: ast.ID(name), + Value: ast.ID(attrs[name]), + } + list = append(list, stmt) + } + return list +} + +func (w *writer) newSubGraph(name string) (*ast.SubGraph, error) { + sub := w.SubGraphs.SubGraphs[name] + w.writtenLocations[sub.Name] = true + s := &ast.SubGraph{} + s.ID = ast.ID(sub.Name) + s.StmtList = appendAttrs(s.StmtList, sub.Attrs) + children := w.Relations.SortedChildren(name) + for _, child := range children { + if w.IsNode(child) { + s.StmtList = append(s.StmtList, w.newNodeStmt(child)) + } else if w.IsSubGraph(child) { + subgraph, err := w.newSubGraph(child) + if err != nil { + return nil, err + } + s.StmtList = append(s.StmtList, subgraph) + } else { + return nil, fmt.Errorf("%v is not a node or a subgraph", child) + } + } + return s, nil +} + +func (w *writer) newNodeID(name string, port string) *ast.NodeID { + node := w.Nodes.Lookup[name] + return ast.MakeNodeID(node.Name, port) +} + +func (w *writer) newNodeStmt(name string) *ast.NodeStmt { + node := w.Nodes.Lookup[name] + id := ast.MakeNodeID(node.Name, "") + w.writtenLocations[node.Name] = true + return &ast.NodeStmt{ + NodeID: id, + Attrs: ast.PutMap(node.Attrs.toMap()), + } +} + +func (w *writer) newLocation(name string, port string) (ast.Location, error) { + if w.IsNode(name) { + return w.newNodeID(name, port), nil + } else if w.isClusterSubGraph(name) { + if len(port) != 0 { + return nil, fmt.Errorf("subgraph cannot have a port: %v", port) + } + return ast.MakeNodeID(name, port), nil + } else if w.IsSubGraph(name) { + if len(port) != 0 { + return nil, fmt.Errorf("subgraph cannot have a port: %v", port) + } + return w.newSubGraph(name) + } + return nil, fmt.Errorf("%v is not a node or a subgraph", name) +} + +func (w *writer) newEdgeStmt(edge *Edge) (*ast.EdgeStmt, error) { + src, err := w.newLocation(edge.Src, edge.SrcPort) + if err != nil { + return nil, err + } + dst, err := w.newLocation(edge.Dst, edge.DstPort) + if err != nil { + return nil, err + } + stmt := &ast.EdgeStmt{ + Source: src, + EdgeRHS: ast.EdgeRHS{ + &ast.EdgeRH{ + Op: ast.EdgeOp(edge.Dir), + Destination: dst, + }, + }, + Attrs: ast.PutMap(edge.Attrs.toMap()), + } + return stmt, nil +} + +func (w *writer) Write() (*ast.Graph, error) { + t := &ast.Graph{} + t.Strict = w.Strict + t.Type = ast.GraphType(w.Directed) + t.ID = ast.ID(w.Name) + + t.StmtList = appendAttrs(t.StmtList, w.Attrs) + + for _, edge := range w.Edges.Edges { + e, err := w.newEdgeStmt(edge) + if err != nil { + return nil, err + } + t.StmtList = append(t.StmtList, e) + } + + subGraphs := w.SubGraphs.Sorted() + for _, s := range subGraphs { + if _, ok := w.writtenLocations[s.Name]; !ok { + if _, ok := w.Relations.ParentToChildren[w.Name][s.Name]; ok { + s, err := w.newSubGraph(s.Name) + if err != nil { + return nil, err + } + t.StmtList = append(t.StmtList, s) + } + } + } + + nodes := w.Nodes.Sorted() + for _, n := range nodes { + if _, ok := w.writtenLocations[n.Name]; !ok { + t.StmtList = append(t.StmtList, w.newNodeStmt(n.Name)) + } + } + + return t, nil +} + +// WriteAst creates an Abstract Syntrax Tree from the Graph. +func (g *Graph) WriteAst() (*ast.Graph, error) { + w := newWriter(g) + return w.Write() +} + +// String returns a DOT string representing the Graph. +func (g *Graph) String() string { + w, err := g.WriteAst() + if err != nil { + return fmt.Sprintf("error: %v", err) + } + return w.String() +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 0000000..339177b --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 0000000..1602287 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 0000000..d7d14f8 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/coreos/go-iptables/LICENSE b/vendor/github.com/coreos/go-iptables/LICENSE new file mode 100644 index 0000000..37ec93a --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-iptables/NOTICE b/vendor/github.com/coreos/go-iptables/NOTICE new file mode 100644 index 0000000..23a0ada --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-iptables/iptables/iptables.go b/vendor/github.com/coreos/go-iptables/iptables/iptables.go new file mode 100644 index 0000000..8db2597 --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/iptables/iptables.go @@ -0,0 +1,530 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "bytes" + "fmt" + "io" + "net" + "os/exec" + "regexp" + "strconv" + "strings" + "syscall" +) + +// Adds the output of stderr to exec.ExitError +type Error struct { + exec.ExitError + cmd exec.Cmd + msg string + exitStatus *int //for overriding +} + +func (e *Error) ExitStatus() int { + if e.exitStatus != nil { + return *e.exitStatus + } + return e.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (e *Error) Error() string { + return fmt.Sprintf("running %v: exit status %v: %v", e.cmd.Args, e.ExitStatus(), e.msg) +} + +// IsNotExist returns true if the error is due to the chain or rule not existing +func (e *Error) IsNotExist() bool { + return e.ExitStatus() == 1 && + (e.msg == "iptables: Bad rule (does a matching rule exist in that chain?).\n" || + e.msg == "iptables: No chain/target/match by that name.\n") +} + +// Protocol to differentiate between IPv4 and IPv6 +type Protocol byte + +const ( + ProtocolIPv4 Protocol = iota + ProtocolIPv6 +) + +type IPTables struct { + path string + proto Protocol + hasCheck bool + hasWait bool + hasRandomFully bool + v1 int + v2 int + v3 int + mode string // the underlying iptables operating mode, e.g. nf_tables +} + +// New creates a new IPTables. +// For backwards compatibility, this always uses IPv4, i.e. "iptables". +func New() (*IPTables, error) { + return NewWithProtocol(ProtocolIPv4) +} + +// New creates a new IPTables for the given proto. +// The proto will determine which command is used, either "iptables" or "ip6tables". +func NewWithProtocol(proto Protocol) (*IPTables, error) { + path, err := exec.LookPath(getIptablesCommand(proto)) + if err != nil { + return nil, err + } + vstring, err := getIptablesVersionString(path) + v1, v2, v3, mode, err := extractIptablesVersion(vstring) + + checkPresent, waitPresent, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3) + + ipt := IPTables{ + path: path, + proto: proto, + hasCheck: checkPresent, + hasWait: waitPresent, + hasRandomFully: randomFullyPresent, + v1: v1, + v2: v2, + v3: v3, + mode: mode, + } + return &ipt, nil +} + +// Proto returns the protocol used by this IPTables. +func (ipt *IPTables) Proto() Protocol { + return ipt.proto +} + +// Exists checks if given rulespec in specified table/chain exists +func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) { + if !ipt.hasCheck { + return ipt.existsForOldIptables(table, chain, rulespec) + + } + cmd := append([]string{"-t", table, "-C", chain}, rulespec...) + err := ipt.run(cmd...) + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Insert inserts rulespec to specified table/chain (in specified pos) +func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error { + cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...) + return ipt.run(cmd...) +} + +// Append appends rulespec to specified table/chain +func (ipt *IPTables) Append(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-A", chain}, rulespec...) + return ipt.run(cmd...) +} + +// AppendUnique acts like Append except that it won't add a duplicate +func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err != nil { + return err + } + + if !exists { + return ipt.Append(table, chain, rulespec...) + } + + return nil +} + +// Delete removes rulespec in specified table/chain +func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-D", chain}, rulespec...) + return ipt.run(cmd...) +} + +// List rules in specified table/chain +func (ipt *IPTables) List(table, chain string) ([]string, error) { + args := []string{"-t", table, "-S", chain} + return ipt.executeList(args) +} + +// List rules (with counters) in specified table/chain +func (ipt *IPTables) ListWithCounters(table, chain string) ([]string, error) { + args := []string{"-t", table, "-v", "-S", chain} + return ipt.executeList(args) +} + +// ListChains returns a slice containing the name of each chain in the specified table. +func (ipt *IPTables) ListChains(table string) ([]string, error) { + args := []string{"-t", table, "-S"} + + result, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + // Iterate over rules to find all default (-P) and user-specified (-N) chains. + // Chains definition always come before rules. + // Format is the following: + // -P OUTPUT ACCEPT + // -N Custom + var chains []string + for _, val := range result { + if strings.HasPrefix(val, "-P") || strings.HasPrefix(val, "-N") { + chains = append(chains, strings.Fields(val)[1]) + } else { + break + } + } + return chains, nil +} + +// Stats lists rules including the byte and packet counts +func (ipt *IPTables) Stats(table, chain string) ([][]string, error) { + args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"} + lines, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + appendSubnet := func(addr string) string { + if strings.IndexByte(addr, byte('/')) < 0 { + if strings.IndexByte(addr, '.') < 0 { + return addr + "/128" + } + return addr + "/32" + } + return addr + } + + ipv6 := ipt.proto == ProtocolIPv6 + + rows := [][]string{} + for i, line := range lines { + // Skip over chain name and field header + if i < 2 { + continue + } + + // Fields: + // 0=pkts 1=bytes 2=target 3=prot 4=opt 5=in 6=out 7=source 8=destination 9=options + line = strings.TrimSpace(line) + fields := strings.Fields(line) + + // The ip6tables verbose output cannot be naively split due to the default "opt" + // field containing 2 single spaces. + if ipv6 { + // Check if field 6 is "opt" or "source" address + dest := fields[6] + ip, _, _ := net.ParseCIDR(dest) + if ip == nil { + ip = net.ParseIP(dest) + } + + // If we detected a CIDR or IP, the "opt" field is empty.. insert it. + if ip != nil { + f := []string{} + f = append(f, fields[:4]...) + f = append(f, " ") // Empty "opt" field for ip6tables + f = append(f, fields[4:]...) + fields = f + } + } + + // Adjust "source" and "destination" to include netmask, to match regular + // List output + fields[7] = appendSubnet(fields[7]) + fields[8] = appendSubnet(fields[8]) + + // Combine "options" fields 9... into a single space-delimited field. + options := fields[9:] + fields = fields[:9] + fields = append(fields, strings.Join(options, " ")) + rows = append(rows, fields) + } + return rows, nil +} + +func (ipt *IPTables) executeList(args []string) ([]string, error) { + var stdout bytes.Buffer + if err := ipt.runWithOutput(args, &stdout); err != nil { + return nil, err + } + + rules := strings.Split(stdout.String(), "\n") + + // strip trailing newline + if len(rules) > 0 && rules[len(rules)-1] == "" { + rules = rules[:len(rules)-1] + } + + // nftables mode doesn't return an error code when listing a non-existent + // chain. Patch that up. + if len(rules) == 0 && ipt.mode == "nf_tables" { + v := 1 + return nil, &Error{ + cmd: exec.Cmd{Args: args}, + msg: "iptables: No chain/target/match by that name.", + exitStatus: &v, + } + } + + for i, rule := range rules { + rules[i] = filterRuleOutput(rule) + } + + return rules, nil +} + +// NewChain creates a new chain in the specified table. +// If the chain already exists, it will result in an error. +func (ipt *IPTables) NewChain(table, chain string) error { + return ipt.run("-t", table, "-N", chain) +} + +// ClearChain flushed (deletes all rules) in the specified table/chain. +// If the chain does not exist, a new one will be created +func (ipt *IPTables) ClearChain(table, chain string) error { + err := ipt.NewChain(table, chain) + + // the exit code for "this table already exists" is different for + // different iptables modes + existsErr := 1 + if ipt.mode == "nf_tables" { + existsErr = 4 + } + + eerr, eok := err.(*Error) + switch { + case err == nil: + return nil + case eok && eerr.ExitStatus() == existsErr: + // chain already exists. Flush (clear) it. + return ipt.run("-t", table, "-F", chain) + default: + return err + } +} + +// RenameChain renames the old chain to the new one. +func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error { + return ipt.run("-t", table, "-E", oldChain, newChain) +} + +// DeleteChain deletes the chain in the specified table. +// The chain must be empty +func (ipt *IPTables) DeleteChain(table, chain string) error { + return ipt.run("-t", table, "-X", chain) +} + +// ChangePolicy changes policy on chain to target +func (ipt *IPTables) ChangePolicy(table, chain, target string) error { + return ipt.run("-t", table, "-P", chain, target) +} + +// Check if the underlying iptables command supports the --random-fully flag +func (ipt *IPTables) HasRandomFully() bool { + return ipt.hasRandomFully +} + +// Return version components of the underlying iptables command +func (ipt *IPTables) GetIptablesVersion() (int, int, int) { + return ipt.v1, ipt.v2, ipt.v3 +} + +// run runs an iptables command with the given arguments, ignoring +// any stdout output +func (ipt *IPTables) run(args ...string) error { + return ipt.runWithOutput(args, nil) +} + +// runWithOutput runs an iptables command with the given arguments, +// writing any stdout output to the given writer +func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error { + args = append([]string{ipt.path}, args...) + if ipt.hasWait { + args = append(args, "--wait") + } else { + fmu, err := newXtablesFileLock() + if err != nil { + return err + } + ul, err := fmu.tryLock() + if err != nil { + return err + } + defer ul.Unlock() + } + + var stderr bytes.Buffer + cmd := exec.Cmd{ + Path: ipt.path, + Args: args, + Stdout: stdout, + Stderr: &stderr, + } + + if err := cmd.Run(); err != nil { + switch e := err.(type) { + case *exec.ExitError: + return &Error{*e, cmd, stderr.String(), nil} + default: + return err + } + } + + return nil +} + +// getIptablesCommand returns the correct command for the given protocol, either "iptables" or "ip6tables". +func getIptablesCommand(proto Protocol) string { + if proto == ProtocolIPv6 { + return "ip6tables" + } else { + return "iptables" + } +} + +// Checks if iptables has the "-C" and "--wait" flag +func getIptablesCommandSupport(v1 int, v2 int, v3 int) (bool, bool, bool) { + return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), iptablesHasRandomFully(v1, v2, v3) +} + +// getIptablesVersion returns the first three components of the iptables version +// and the operating mode (e.g. nf_tables or legacy) +// e.g. "iptables v1.3.66" would return (1, 3, 66, legacy, nil) +func extractIptablesVersion(str string) (int, int, int, string, error) { + versionMatcher := regexp.MustCompile(`v([0-9]+)\.([0-9]+)\.([0-9]+)(?:\s+\((\w+))?`) + result := versionMatcher.FindStringSubmatch(str) + if result == nil { + return 0, 0, 0, "", fmt.Errorf("no iptables version found in string: %s", str) + } + + v1, err := strconv.Atoi(result[1]) + if err != nil { + return 0, 0, 0, "", err + } + + v2, err := strconv.Atoi(result[2]) + if err != nil { + return 0, 0, 0, "", err + } + + v3, err := strconv.Atoi(result[3]) + if err != nil { + return 0, 0, 0, "", err + } + + mode := "legacy" + if result[4] != "" { + mode = result[4] + } + return v1, v2, v3, mode, nil +} + +// Runs "iptables --version" to get the version string +func getIptablesVersionString(path string) (string, error) { + cmd := exec.Command(path, "--version") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + return out.String(), nil +} + +// Checks if an iptables version is after 1.4.11, when --check was added +func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 11 { + return true + } + return false +} + +// Checks if an iptables version is after 1.4.20, when --wait was added +func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 20 { + return true + } + return false +} + +// Checks if an iptables version is after 1.6.2, when --random-fully was added +func iptablesHasRandomFully(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 6 { + return true + } + if v1 == 1 && v2 == 6 && v3 >= 2 { + return true + } + return false +} + +// Checks if a rule specification exists for a table +func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) { + rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ") + args := []string{"-t", table, "-S"} + var stdout bytes.Buffer + err := ipt.runWithOutput(args, &stdout) + if err != nil { + return false, err + } + return strings.Contains(stdout.String(), rs), nil +} + +// counterRegex is the regex used to detect nftables counter format +var counterRegex = regexp.MustCompile(`^\[([0-9]+):([0-9]+)\] `) + +// filterRuleOutput works around some inconsistencies in output. +// For example, when iptables is in legacy vs. nftables mode, it produces +// different results. +func filterRuleOutput(rule string) string { + out := rule + + // work around an output difference in nftables mode where counters + // are output in iptables-save format, rather than iptables -S format + // The string begins with "[0:0]" + // + // Fixes #49 + if groups := counterRegex.FindStringSubmatch(out); groups != nil { + // drop the brackets + out = out[len(groups[0]):] + out = fmt.Sprintf("%s -c %s %s", out, groups[1], groups[2]) + } + + return out +} diff --git a/vendor/github.com/coreos/go-iptables/iptables/lock.go b/vendor/github.com/coreos/go-iptables/iptables/lock.go new file mode 100644 index 0000000..a88e92b --- /dev/null +++ b/vendor/github.com/coreos/go-iptables/iptables/lock.go @@ -0,0 +1,84 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "os" + "sync" + "syscall" +) + +const ( + // In earlier versions of iptables, the xtables lock was implemented + // via a Unix socket, but now flock is used via this lockfile: + // http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707 + // Note the LSB-conforming "/run" directory does not exist on old + // distributions, so assume "/var" is symlinked + xtablesLockFilePath = "/var/run/xtables.lock" + + defaultFilePerm = 0600 +) + +type Unlocker interface { + Unlock() error +} + +type nopUnlocker struct{} + +func (_ nopUnlocker) Unlock() error { return nil } + +type fileLock struct { + // mu is used to protect against concurrent invocations from within this process + mu sync.Mutex + fd int +} + +// tryLock takes an exclusive lock on the xtables lock file without blocking. +// This is best-effort only: if the exclusive lock would block (i.e. because +// another process already holds it), no error is returned. Otherwise, any +// error encountered during the locking operation is returned. +// The returned Unlocker should be used to release the lock when the caller is +// done invoking iptables commands. +func (l *fileLock) tryLock() (Unlocker, error) { + l.mu.Lock() + err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB) + switch err { + case syscall.EWOULDBLOCK: + l.mu.Unlock() + return nopUnlocker{}, nil + case nil: + return l, nil + default: + l.mu.Unlock() + return nil, err + } +} + +// Unlock closes the underlying file, which implicitly unlocks it as well. It +// also unlocks the associated mutex. +func (l *fileLock) Unlock() error { + defer l.mu.Unlock() + return syscall.Close(l.fd) +} + +// newXtablesFileLock opens a new lock on the xtables lockfile without +// acquiring the lock +func newXtablesFileLock() (*fileLock, error) { + fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm) + if err != nil { + return nil, err + } + return &fileLock{fd: fd}, nil +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000..bc52e96 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..7929947 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..205c28d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..1be8ce9 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..f78d89f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..b04edb7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000..32c0e33 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE new file mode 100644 index 0000000..9d83342 --- /dev/null +++ b/vendor/github.com/go-kit/kit/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md new file mode 100644 index 0000000..7222f80 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/README.md @@ -0,0 +1,147 @@ +# package log + +`package log` provides a minimal interface for structured logging in services. +It may be wrapped to encode conventions, enforce type-safety, provide leveled +logging, and so on. It can be used for both typical application log events, +and log-structured data streams. + +## Structured logging + +Structured logging is, basically, conceding to the reality that logs are +_data_, and warrant some level of schematic rigor. Using a stricter, +key/value-oriented message format for our logs, containing contextual and +semantic information, makes it much easier to get insight into the +operational activity of the systems we build. Consequently, `package log` is +of the strong belief that "[the benefits of structured logging outweigh the +minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". + +Migrating from unstructured to structured logging is probably a lot easier +than you'd expect. + +```go +// Unstructured +log.Printf("HTTP server listening on %s", addr) + +// Structured +logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") +``` + +## Usage + +### Typical application logging + +```go +w := log.NewSyncWriter(os.Stderr) +logger := log.NewLogfmtLogger(w) +logger.Log("question", "what is the meaning of life?", "answer", 42) + +// Output: +// question="what is the meaning of life?" answer=42 +``` + +### Contextual Loggers + +```go +func main() { + var logger log.Logger + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + logger = log.With(logger, "instance_id", 123) + + logger.Log("msg", "starting") + NewWorker(log.With(logger, "component", "worker")).Run() + NewSlacker(log.With(logger, "component", "slacker")).Run() +} + +// Output: +// instance_id=123 msg=starting +// instance_id=123 component=worker msg=running +// instance_id=123 component=slacker msg=running +``` + +### Interact with stdlib logger + +Redirect stdlib logger to Go kit logger. + +```go +import ( + "os" + stdlog "log" + kitlog "github.com/go-kit/kit/log" +) + +func main() { + logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) + stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) + stdlog.Print("I sure like pie") +} + +// Output: +// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} +``` + +Or, if, for legacy reasons, you need to pipe all of your logging through the +stdlib log package, you can redirect Go kit logger to the stdlib logger. + +```go +logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) +logger.Log("legacy", true, "msg", "at least it's something") + +// Output: +// 2016/01/01 12:34:56 legacy=true msg="at least it's something" +``` + +### Timestamps and callers + +```go +var logger log.Logger +logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) +logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) + +logger.Log("msg", "hello") + +// Output: +// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello +``` + +## Supported output formats + +- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) +- JSON + +## Enhancements + +`package log` is centered on the one-method Logger interface. + +```go +type Logger interface { + Log(keyvals ...interface{}) error +} +``` + +This interface, and its supporting code like is the product of much iteration +and evaluation. For more details on the evolution of the Logger interface, +see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), +a talk by [Chris Hines](https://github.com/ChrisHines). +Also, please see +[#63](https://github.com/go-kit/kit/issues/63), +[#76](https://github.com/go-kit/kit/pull/76), +[#131](https://github.com/go-kit/kit/issues/131), +[#157](https://github.com/go-kit/kit/pull/157), +[#164](https://github.com/go-kit/kit/issues/164), and +[#252](https://github.com/go-kit/kit/pull/252) +to review historical conversations about package log and the Logger interface. + +Value-add packages and suggestions, +like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level), +are of course welcome. Good proposals should + +- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With), +- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and +- Be friendly to packages that accept only an unadorned log.Logger. + +## Benchmarks & comparisons + +There are a few Go logging benchmarks and comparisons that include Go kit's package log. + +- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log +- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go new file mode 100644 index 0000000..918c0af --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/doc.go @@ -0,0 +1,116 @@ +// Package log provides a structured logger. +// +// Structured logging produces logs easily consumed later by humans or +// machines. Humans might be interested in debugging errors, or tracing +// specific requests. Machines might be interested in counting interesting +// events, or aggregating information for off-line processing. In both cases, +// it is important that the log messages are structured and actionable. +// Package log is designed to encourage both of these best practices. +// +// Basic Usage +// +// The fundamental interface is Logger. Loggers create log events from +// key/value data. The Logger interface has a single method, Log, which +// accepts a sequence of alternating key/value pairs, which this package names +// keyvals. +// +// type Logger interface { +// Log(keyvals ...interface{}) error +// } +// +// Here is an example of a function using a Logger to create log events. +// +// func RunTask(task Task, logger log.Logger) string { +// logger.Log("taskID", task.ID, "event", "starting task") +// ... +// logger.Log("taskID", task.ID, "event", "task complete") +// } +// +// The keys in the above example are "taskID" and "event". The values are +// task.ID, "starting task", and "task complete". Every key is followed +// immediately by its value. +// +// Keys are usually plain strings. Values may be any type that has a sensible +// encoding in the chosen log format. With structured logging it is a good +// idea to log simple values without formatting them. This practice allows +// the chosen logger to encode values in the most appropriate way. +// +// Contextual Loggers +// +// A contextual logger stores keyvals that it includes in all log events. +// Building appropriate contextual loggers reduces repetition and aids +// consistency in the resulting log output. With and WithPrefix add context to +// a logger. We can use With to improve the RunTask example. +// +// func RunTask(task Task, logger log.Logger) string { +// logger = log.With(logger, "taskID", task.ID) +// logger.Log("event", "starting task") +// ... +// taskHelper(task.Cmd, logger) +// ... +// logger.Log("event", "task complete") +// } +// +// The improved version emits the same log events as the original for the +// first and last calls to Log. Passing the contextual logger to taskHelper +// enables each log event created by taskHelper to include the task.ID even +// though taskHelper does not have access to that value. Using contextual +// loggers this way simplifies producing log output that enables tracing the +// life cycle of individual tasks. (See the Contextual example for the full +// code of the above snippet.) +// +// Dynamic Contextual Values +// +// A Valuer function stored in a contextual logger generates a new value each +// time an event is logged. The Valuer example demonstrates how this feature +// works. +// +// Valuers provide the basis for consistently logging timestamps and source +// code location. The log package defines several valuers for that purpose. +// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and +// DefaultCaller. A common logger initialization sequence that ensures all log +// entries contain a timestamp and source location looks like this: +// +// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) +// +// Concurrent Safety +// +// Applications with multiple goroutines want each log event written to the +// same logger to remain separate from other log events. Package log provides +// two simple solutions for concurrent safe logging. +// +// NewSyncWriter wraps an io.Writer and serializes each call to its Write +// method. Using a SyncWriter has the benefit that the smallest practical +// portion of the logging logic is performed within a mutex, but it requires +// the formatting Logger to make only one call to Write per log event. +// +// NewSyncLogger wraps any Logger and serializes each call to its Log method. +// Using a SyncLogger has the benefit that it guarantees each log event is +// handled atomically within the wrapped logger, but it typically serializes +// both the formatting and output logic. Use a SyncLogger if the formatting +// logger may perform multiple writes per log event. +// +// Error Handling +// +// This package relies on the practice of wrapping or decorating loggers with +// other loggers to provide composable pieces of functionality. It also means +// that Logger.Log must return an error because some +// implementations—especially those that output log data to an io.Writer—may +// encounter errors that cannot be handled locally. This in turn means that +// Loggers that wrap other loggers should return errors from the wrapped +// logger up the stack. +// +// Fortunately, the decorator pattern also provides a way to avoid the +// necessity to check for errors every time an application calls Logger.Log. +// An application required to panic whenever its Logger encounters +// an error could initialize its logger as follows. +// +// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) +// logger := log.LoggerFunc(func(keyvals ...interface{}) error { +// if err := fmtlogger.Log(keyvals...); err != nil { +// panic(err) +// } +// return nil +// }) +package log diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go new file mode 100644 index 0000000..66094b4 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/json_logger.go @@ -0,0 +1,89 @@ +package log + +import ( + "encoding" + "encoding/json" + "fmt" + "io" + "reflect" +) + +type jsonLogger struct { + io.Writer +} + +// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a +// single JSON object. Each log event produces no more than one call to +// w.Write. The passed Writer must be safe for concurrent use by multiple +// goroutines if the returned Logger will be used concurrently. +func NewJSONLogger(w io.Writer) Logger { + return &jsonLogger{w} +} + +func (l *jsonLogger) Log(keyvals ...interface{}) error { + n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd + m := make(map[string]interface{}, n) + for i := 0; i < len(keyvals); i += 2 { + k := keyvals[i] + var v interface{} = ErrMissingValue + if i+1 < len(keyvals) { + v = keyvals[i+1] + } + merge(m, k, v) + } + return json.NewEncoder(l.Writer).Encode(m) +} + +func merge(dst map[string]interface{}, k, v interface{}) { + var key string + switch x := k.(type) { + case string: + key = x + case fmt.Stringer: + key = safeString(x) + default: + key = fmt.Sprint(x) + } + + // We want json.Marshaler and encoding.TextMarshaller to take priority over + // err.Error() and v.String(). But json.Marshall (called later) does that by + // default so we force a no-op if it's one of those 2 case. + switch x := v.(type) { + case json.Marshaler: + case encoding.TextMarshaler: + case error: + v = safeError(x) + case fmt.Stringer: + v = safeString(x) + } + + dst[key] = v +} + +func safeString(str fmt.Stringer) (s string) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { + s = "NULL" + } else { + panic(panicVal) + } + } + }() + s = str.String() + return +} + +func safeError(err error) (s interface{}) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + s = nil + } else { + panic(panicVal) + } + } + }() + s = err.Error() + return +} diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go new file mode 100644 index 0000000..505d307 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/level/doc.go @@ -0,0 +1,22 @@ +// Package level implements leveled logging on top of Go kit's log package. To +// use the level package, create a logger as per normal in your func main, and +// wrap it with level.NewFilter. +// +// var logger log.Logger +// logger = log.NewLogfmtLogger(os.Stderr) +// logger = level.NewFilter(logger, level.AllowInfo()) // <-- +// logger = log.With(logger, "ts", log.DefaultTimestampUTC) +// +// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error +// helper methods to emit leveled log events. +// +// logger.Log("foo", "bar") // as normal, no level +// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) +// if value > 100 { +// level.Error(logger).Log("value", value) +// } +// +// NewFilter allows precise control over what happens when a log event is +// emitted without a level key, or if a squelched level is used. Check the +// Option functions for details. +package level diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go new file mode 100644 index 0000000..fceafc4 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/level/level.go @@ -0,0 +1,205 @@ +package level + +import "github.com/go-kit/kit/log" + +// Error returns a logger that includes a Key/ErrorValue pair. +func Error(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), ErrorValue()) +} + +// Warn returns a logger that includes a Key/WarnValue pair. +func Warn(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), WarnValue()) +} + +// Info returns a logger that includes a Key/InfoValue pair. +func Info(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), InfoValue()) +} + +// Debug returns a logger that includes a Key/DebugValue pair. +func Debug(logger log.Logger) log.Logger { + return log.WithPrefix(logger, Key(), DebugValue()) +} + +// NewFilter wraps next and implements level filtering. See the commentary on +// the Option functions for a detailed description of how to configure levels. +// If no options are provided, all leveled log events created with Debug, +// Info, Warn or Error helper methods are squelched and non-leveled log +// events are passed to next unmodified. +func NewFilter(next log.Logger, options ...Option) log.Logger { + l := &logger{ + next: next, + } + for _, option := range options { + option(l) + } + return l +} + +type logger struct { + next log.Logger + allowed level + squelchNoLevel bool + errNotAllowed error + errNoLevel error +} + +func (l *logger) Log(keyvals ...interface{}) error { + var hasLevel, levelAllowed bool + for i := 1; i < len(keyvals); i += 2 { + if v, ok := keyvals[i].(*levelValue); ok { + hasLevel = true + levelAllowed = l.allowed&v.level != 0 + break + } + } + if !hasLevel && l.squelchNoLevel { + return l.errNoLevel + } + if hasLevel && !levelAllowed { + return l.errNotAllowed + } + return l.next.Log(keyvals...) +} + +// Option sets a parameter for the leveled logger. +type Option func(*logger) + +// AllowAll is an alias for AllowDebug. +func AllowAll() Option { + return AllowDebug() +} + +// AllowDebug allows error, warn, info and debug level log events to pass. +func AllowDebug() Option { + return allowed(levelError | levelWarn | levelInfo | levelDebug) +} + +// AllowInfo allows error, warn and info level log events to pass. +func AllowInfo() Option { + return allowed(levelError | levelWarn | levelInfo) +} + +// AllowWarn allows error and warn level log events to pass. +func AllowWarn() Option { + return allowed(levelError | levelWarn) +} + +// AllowError allows only error level log events to pass. +func AllowError() Option { + return allowed(levelError) +} + +// AllowNone allows no leveled log events to pass. +func AllowNone() Option { + return allowed(0) +} + +func allowed(allowed level) Option { + return func(l *logger) { l.allowed = allowed } +} + +// ErrNotAllowed sets the error to return from Log when it squelches a log +// event disallowed by the configured Allow[Level] option. By default, +// ErrNotAllowed is nil; in this case the log event is squelched with no +// error. +func ErrNotAllowed(err error) Option { + return func(l *logger) { l.errNotAllowed = err } +} + +// SquelchNoLevel instructs Log to squelch log events with no level, so that +// they don't proceed through to the wrapped logger. If SquelchNoLevel is set +// to true and a log event is squelched in this way, the error value +// configured with ErrNoLevel is returned to the caller. +func SquelchNoLevel(squelch bool) Option { + return func(l *logger) { l.squelchNoLevel = squelch } +} + +// ErrNoLevel sets the error to return from Log when it squelches a log event +// with no level. By default, ErrNoLevel is nil; in this case the log event is +// squelched with no error. +func ErrNoLevel(err error) Option { + return func(l *logger) { l.errNoLevel = err } +} + +// NewInjector wraps next and returns a logger that adds a Key/level pair to +// the beginning of log events that don't already contain a level. In effect, +// this gives a default level to logs without a level. +func NewInjector(next log.Logger, level Value) log.Logger { + return &injector{ + next: next, + level: level, + } +} + +type injector struct { + next log.Logger + level interface{} +} + +func (l *injector) Log(keyvals ...interface{}) error { + for i := 1; i < len(keyvals); i += 2 { + if _, ok := keyvals[i].(*levelValue); ok { + return l.next.Log(keyvals...) + } + } + kvs := make([]interface{}, len(keyvals)+2) + kvs[0], kvs[1] = key, l.level + copy(kvs[2:], keyvals) + return l.next.Log(kvs...) +} + +// Value is the interface that each of the canonical level values implement. +// It contains unexported methods that prevent types from other packages from +// implementing it and guaranteeing that NewFilter can distinguish the levels +// defined in this package from all other values. +type Value interface { + String() string + levelVal() +} + +// Key returns the unique key added to log events by the loggers in this +// package. +func Key() interface{} { return key } + +// ErrorValue returns the unique value added to log events by Error. +func ErrorValue() Value { return errorValue } + +// WarnValue returns the unique value added to log events by Warn. +func WarnValue() Value { return warnValue } + +// InfoValue returns the unique value added to log events by Info. +func InfoValue() Value { return infoValue } + +// DebugValue returns the unique value added to log events by Warn. +func DebugValue() Value { return debugValue } + +var ( + // key is of type interface{} so that it allocates once during package + // initialization and avoids allocating every time the value is added to a + // []interface{} later. + key interface{} = "level" + + errorValue = &levelValue{level: levelError, name: "error"} + warnValue = &levelValue{level: levelWarn, name: "warn"} + infoValue = &levelValue{level: levelInfo, name: "info"} + debugValue = &levelValue{level: levelDebug, name: "debug"} +) + +type level byte + +const ( + levelDebug level = 1 << iota + levelInfo + levelWarn + levelError +) + +type levelValue struct { + name string + level +} + +func (v *levelValue) String() string { return v.name } +func (v *levelValue) levelVal() {} diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go new file mode 100644 index 0000000..66a9e2f --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/log.go @@ -0,0 +1,135 @@ +package log + +import "errors" + +// Logger is the fundamental interface for all log operations. Log creates a +// log event from keyvals, a variadic sequence of alternating keys and values. +// Implementations must be safe for concurrent use by multiple goroutines. In +// particular, any implementation of Logger that appends to keyvals or +// modifies or retains any of its elements must make a copy first. +type Logger interface { + Log(keyvals ...interface{}) error +} + +// ErrMissingValue is appended to keyvals slices with odd length to substitute +// the missing value. +var ErrMissingValue = errors.New("(MISSING)") + +// With returns a new contextual logger with keyvals prepended to those passed +// to calls to Log. If logger is also a contextual logger created by With or +// WithPrefix, keyvals is appended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func With(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + return &context{ + logger: l.logger, + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + keyvals: kvs[:len(kvs):len(kvs)], + hasValuer: l.hasValuer || containsValuer(keyvals), + } +} + +// WithPrefix returns a new contextual logger with keyvals prepended to those +// passed to calls to Log. If logger is also a contextual logger created by +// With or WithPrefix, keyvals is prepended to the existing context. +// +// The returned Logger replaces all value elements (odd indexes) containing a +// Valuer with their generated value for each call to its Log method. +func WithPrefix(logger Logger, keyvals ...interface{}) Logger { + if len(keyvals) == 0 { + return logger + } + l := newContext(logger) + // Limiting the capacity of the stored keyvals ensures that a new + // backing array is created if the slice must grow in Log or With. + // Using the extra capacity without copying risks a data race that + // would violate the Logger interface contract. + n := len(l.keyvals) + len(keyvals) + if len(keyvals)%2 != 0 { + n++ + } + kvs := make([]interface{}, 0, n) + kvs = append(kvs, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + kvs = append(kvs, l.keyvals...) + return &context{ + logger: l.logger, + keyvals: kvs, + hasValuer: l.hasValuer || containsValuer(keyvals), + } +} + +// context is the Logger implementation returned by With and WithPrefix. It +// wraps a Logger and holds keyvals that it includes in all log events. Its +// Log method calls bindValues to generate values for each Valuer in the +// context keyvals. +// +// A context must always have the same number of stack frames between calls to +// its Log method and the eventual binding of Valuers to their value. This +// requirement comes from the functional requirement to allow a context to +// resolve application call site information for a Caller stored in the +// context. To do this we must be able to predict the number of logging +// functions on the stack when bindValues is called. +// +// Two implementation details provide the needed stack depth consistency. +// +// 1. newContext avoids introducing an additional layer when asked to +// wrap another context. +// 2. With and WithPrefix avoid introducing an additional layer by +// returning a newly constructed context with a merged keyvals rather +// than simply wrapping the existing context. +type context struct { + logger Logger + keyvals []interface{} + hasValuer bool +} + +func newContext(logger Logger) *context { + if c, ok := logger.(*context); ok { + return c + } + return &context{logger: logger} +} + +// Log replaces all value elements (odd indexes) containing a Valuer in the +// stored context with their generated value, appends keyvals, and passes the +// result to the wrapped Logger. +func (l *context) Log(keyvals ...interface{}) error { + kvs := append(l.keyvals, keyvals...) + if len(kvs)%2 != 0 { + kvs = append(kvs, ErrMissingValue) + } + if l.hasValuer { + // If no keyvals were appended above then we must copy l.keyvals so + // that future log events will reevaluate the stored Valuers. + if len(keyvals) == 0 { + kvs = append([]interface{}{}, l.keyvals...) + } + bindValues(kvs[:len(l.keyvals)]) + } + return l.logger.Log(kvs...) +} + +// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If +// f is a function with the appropriate signature, LoggerFunc(f) is a Logger +// object that calls f. +type LoggerFunc func(...interface{}) error + +// Log implements Logger by calling f(keyvals...). +func (f LoggerFunc) Log(keyvals ...interface{}) error { + return f(keyvals...) +} diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go new file mode 100644 index 0000000..a003052 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/logfmt_logger.go @@ -0,0 +1,62 @@ +package log + +import ( + "bytes" + "io" + "sync" + + "github.com/go-logfmt/logfmt" +) + +type logfmtEncoder struct { + *logfmt.Encoder + buf bytes.Buffer +} + +func (l *logfmtEncoder) Reset() { + l.Encoder.Reset() + l.buf.Reset() +} + +var logfmtEncoderPool = sync.Pool{ + New: func() interface{} { + var enc logfmtEncoder + enc.Encoder = logfmt.NewEncoder(&enc.buf) + return &enc + }, +} + +type logfmtLogger struct { + w io.Writer +} + +// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in +// logfmt format. Each log event produces no more than one call to w.Write. +// The passed Writer must be safe for concurrent use by multiple goroutines if +// the returned Logger will be used concurrently. +func NewLogfmtLogger(w io.Writer) Logger { + return &logfmtLogger{w} +} + +func (l logfmtLogger) Log(keyvals ...interface{}) error { + enc := logfmtEncoderPool.Get().(*logfmtEncoder) + enc.Reset() + defer logfmtEncoderPool.Put(enc) + + if err := enc.EncodeKeyvals(keyvals...); err != nil { + return err + } + + // Add newline to the end of the buffer + if err := enc.EndRecord(); err != nil { + return err + } + + // The Logger interface requires implementations to be safe for concurrent + // use by multiple goroutines. For this implementation that means making + // only one call to l.w.Write() for each call to Log. + if _, err := l.w.Write(enc.buf.Bytes()); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go new file mode 100644 index 0000000..1047d62 --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/nop_logger.go @@ -0,0 +1,8 @@ +package log + +type nopLogger struct{} + +// NewNopLogger returns a logger that doesn't do anything. +func NewNopLogger() Logger { return nopLogger{} } + +func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go new file mode 100644 index 0000000..ff96b5d --- /dev/null +++ b/vendor/github.com/go-kit/kit/log/stdlib.go @@ -0,0 +1,116 @@ +package log + +import ( + "io" + "log" + "regexp" + "strings" +) + +// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's +// designed to be passed to a Go kit logger as the writer, for cases where +// it's necessary to redirect all Go kit log output to the stdlib logger. +// +// If you have any choice in the matter, you shouldn't use this. Prefer to +// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. +type StdlibWriter struct{} + +// Write implements io.Writer. +func (w StdlibWriter) Write(p []byte) (int, error) { + log.Print(strings.TrimSpace(string(p))) + return len(p), nil +} + +// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib +// logger's SetOutput. It will extract date/timestamps, filenames, and +// messages, and place them under relevant keys. +type StdlibAdapter struct { + Logger + timestampKey string + fileKey string + messageKey string +} + +// StdlibAdapterOption sets a parameter for the StdlibAdapter. +type StdlibAdapterOption func(*StdlibAdapter) + +// TimestampKey sets the key for the timestamp field. By default, it's "ts". +func TimestampKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.timestampKey = key } +} + +// FileKey sets the key for the file and line field. By default, it's "caller". +func FileKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.fileKey = key } +} + +// MessageKey sets the key for the actual log message. By default, it's "msg". +func MessageKey(key string) StdlibAdapterOption { + return func(a *StdlibAdapter) { a.messageKey = key } +} + +// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed +// logger. It's designed to be passed to log.SetOutput. +func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { + a := StdlibAdapter{ + Logger: logger, + timestampKey: "ts", + fileKey: "caller", + messageKey: "msg", + } + for _, option := range options { + option(&a) + } + return a +} + +func (a StdlibAdapter) Write(p []byte) (int, error) { + result := subexps(p) + keyvals := []interface{}{} + var timestamp string + if date, ok := result["date"]; ok && date != "" { + timestamp = date + } + if time, ok := result["time"]; ok && time != "" { + if timestamp != "" { + timestamp += " " + } + timestamp += time + } + if timestamp != "" { + keyvals = append(keyvals, a.timestampKey, timestamp) + } + if file, ok := result["file"]; ok && file != "" { + keyvals = append(keyvals, a.fileKey, file) + } + if msg, ok := result["msg"]; ok { + keyvals = append(keyvals, a.messageKey, msg) + } + if err := a.Logger.Log(keyvals...); err != nil { + return 0, err + } + return len(p), nil +} + +const ( + logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` + logRegexpTime = `(?P