vendor: revendor

This commit is contained in:
Lucas Servén Marín 2019-05-17 00:23:05 +02:00
parent adb09ce620
commit ca70fec14f
No known key found for this signature in database
GPG Key ID: 586FEAF680DA74AD
139 changed files with 38912 additions and 0 deletions

4
go.mod
View File

@ -39,11 +39,13 @@ require (
github.com/vishvananda/netlink v1.0.0
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 // indirect
golang.org/x/lint v0.0.0-20190409202823-959b441ac422
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 // indirect
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a // indirect
golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872
golang.org/x/text v0.3.2 // indirect
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c // indirect
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 // indirect
google.golang.org/appengine v1.5.0 // indirect
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
@ -51,6 +53,8 @@ require (
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed
k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1
k8s.io/client-go v11.0.0+incompatible
k8s.io/code-generator v0.0.0-20190311093542-50b561225d70
k8s.io/gengo v0.0.0-20181106084056-51747d6e00da // indirect
k8s.io/klog v0.3.0 // indirect
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 // indirect

10
go.sum
View File

@ -143,12 +143,15 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo=
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo=
@ -174,6 +177,9 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e h1:FDhOuMEY4JVRztM/gsbk+IKUQ8kj74bxZrgw87eMMVc=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -198,6 +204,10 @@ k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1 h1:IS7K02iBkQXpCeieSiyJjG
k8s.io/apimachinery v0.0.0-20190313205120-d7deff9243b1/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=
k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
k8s.io/code-generator v0.0.0-20190311093542-50b561225d70 h1:lgPp615xLHxN84RBd+viA/oHzJfI0miFYFH4T9wpPQ4=
k8s.io/code-generator v0.0.0-20190311093542-50b561225d70/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8=
k8s.io/gengo v0.0.0-20181106084056-51747d6e00da h1:ZMvcXtMVbhUCtCuiSEzBV+Eur4swzfdxx6ZyX3qT6dk=
k8s.io/gengo v0.0.0-20181106084056-51747d6e00da/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI=

19
vendor/golang.org/x/lint/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,19 @@
sudo: false
language: go
go:
- 1.10.x
- 1.11.x
- master
go_import_path: golang.org/x/lint
install:
- go get -t -v ./...
script:
- go test -v -race ./...
matrix:
allow_failures:
- go: master
fast_finish: true

15
vendor/golang.org/x/lint/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,15 @@
# Contributing to Golint
## Before filing an issue:
### Are you having trouble building golint?
Check you have the latest version of its dependencies. Run
```
go get -u golang.org/x/lint/golint
```
If you still have problems, consider searching for existing issues before filing a new issue.
## Before sending a pull request:
Have you understood the purpose of golint? Make sure to carefully read `README`.

27
vendor/golang.org/x/lint/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2013 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

88
vendor/golang.org/x/lint/README.md generated vendored Normal file
View File

@ -0,0 +1,88 @@
Golint is a linter for Go source code.
[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint)
## Installation
Golint requires a
[supported release of Go](https://golang.org/doc/devel/release.html#policy).
go get -u golang.org/x/lint/golint
To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting.
## Usage
Invoke `golint` with one or more filenames, directories, or packages named
by its import path. Golint uses the same
[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as
the `go` command and therefore
also supports relative import paths like `./...`. Additionally the `...`
wildcard can be used as suffix on relative and absolute file paths to recurse
into them.
The output of this tool is a list of suggestions in Vim quickfix format,
which is accepted by lots of different editors.
## Purpose
Golint differs from gofmt. Gofmt reformats Go source code, whereas
golint prints out style mistakes.
Golint differs from govet. Govet is concerned with correctness, whereas
golint is concerned with coding style. Golint is in use at Google, and it
seeks to match the accepted style of the open source Go project.
The suggestions made by golint are exactly that: suggestions.
Golint is not perfect, and has both false positives and false negatives.
Do not treat its output as a gold standard. We will not be adding pragmas
or other knobs to suppress specific warnings, so do not expect or require
code to be completely "lint-free".
In short, this tool is not, and will never be, trustworthy enough for its
suggestions to be enforced automatically, for example as part of a build process.
Golint makes suggestions for many of the mechanically checkable items listed in
[Effective Go](https://golang.org/doc/effective_go.html) and the
[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments).
## Scope
Golint is meant to carry out the stylistic conventions put forth in
[Effective Go](https://golang.org/doc/effective_go.html) and
[CodeReviewComments](https://golang.org/wiki/CodeReviewComments).
Changes that are not aligned with those documents will not be considered.
## Contributions
Contributions to this project are welcome provided they are [in scope](#scope),
though please send mail before starting work on anything major.
Contributors retain their copyright, so we need you to fill out
[a short form](https://developers.google.com/open-source/cla/individual)
before we can accept your contribution.
## Vim
Add this to your ~/.vimrc:
set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim
If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
Running `:Lint` will run golint on the current file and populate the quickfix list.
Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w`
autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow
## Emacs
Add this to your `.emacs` file:
(add-to-list 'load-path (concat (getenv "GOPATH") "/src/github.com/golang/lint/misc/emacs"))
(require 'golint)
If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value.
Running M-x golint will run golint on the current file.
For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html).

3
vendor/golang.org/x/lint/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module golang.org/x/lint
require golang.org/x/tools v0.0.0-20190311212946-11955173bddd

6
vendor/golang.org/x/lint/go.sum generated vendored Normal file
View File

@ -0,0 +1,6 @@
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=

159
vendor/golang.org/x/lint/golint/golint.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
// Copyright (c) 2013 The Go Authors. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd.
// golint lints the Go source files named on its command line.
package main
import (
"flag"
"fmt"
"go/build"
"io/ioutil"
"os"
"path/filepath"
"strings"
"golang.org/x/lint"
)
var (
minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it")
setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found")
suggestions int
)
func usage() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n")
fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n")
fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n")
fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n")
fmt.Fprintf(os.Stderr, "Flags:\n")
flag.PrintDefaults()
}
func main() {
flag.Usage = usage
flag.Parse()
if flag.NArg() == 0 {
lintDir(".")
} else {
// dirsRun, filesRun, and pkgsRun indicate whether golint is applied to
// directory, file or package targets. The distinction affects which
// checks are run. It is no valid to mix target types.
var dirsRun, filesRun, pkgsRun int
var args []string
for _, arg := range flag.Args() {
if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) {
dirsRun = 1
for _, dirname := range allPackagesInFS(arg) {
args = append(args, dirname)
}
} else if isDir(arg) {
dirsRun = 1
args = append(args, arg)
} else if exists(arg) {
filesRun = 1
args = append(args, arg)
} else {
pkgsRun = 1
args = append(args, arg)
}
}
if dirsRun+filesRun+pkgsRun != 1 {
usage()
os.Exit(2)
}
switch {
case dirsRun == 1:
for _, dir := range args {
lintDir(dir)
}
case filesRun == 1:
lintFiles(args...)
case pkgsRun == 1:
for _, pkg := range importPaths(args) {
lintPackage(pkg)
}
}
}
if *setExitStatus && suggestions > 0 {
fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions)
os.Exit(1)
}
}
func isDir(filename string) bool {
fi, err := os.Stat(filename)
return err == nil && fi.IsDir()
}
func exists(filename string) bool {
_, err := os.Stat(filename)
return err == nil
}
func lintFiles(filenames ...string) {
files := make(map[string][]byte)
for _, filename := range filenames {
src, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Fprintln(os.Stderr, err)
continue
}
files[filename] = src
}
l := new(lint.Linter)
ps, err := l.LintFiles(files)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
for _, p := range ps {
if p.Confidence >= *minConfidence {
fmt.Printf("%v: %s\n", p.Position, p.Text)
suggestions++
}
}
}
func lintDir(dirname string) {
pkg, err := build.ImportDir(dirname, 0)
lintImportedPackage(pkg, err)
}
func lintPackage(pkgname string) {
pkg, err := build.Import(pkgname, ".", 0)
lintImportedPackage(pkg, err)
}
func lintImportedPackage(pkg *build.Package, err error) {
if err != nil {
if _, nogo := err.(*build.NoGoError); nogo {
// Don't complain if the failure is due to no Go source files.
return
}
fmt.Fprintln(os.Stderr, err)
return
}
var files []string
files = append(files, pkg.GoFiles...)
files = append(files, pkg.CgoFiles...)
files = append(files, pkg.TestGoFiles...)
if pkg.Dir != "." {
for i, f := range files {
files[i] = filepath.Join(pkg.Dir, f)
}
}
// TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles)
lintFiles(files...)
}

309
vendor/golang.org/x/lint/golint/import.go generated vendored Normal file
View File

@ -0,0 +1,309 @@
package main
/*
This file holds a direct copy of the import path matching code of
https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be
replaced when https://golang.org/issue/8768 is resolved.
It has been updated to follow upstream changes in a few ways.
*/
import (
"fmt"
"go/build"
"log"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
)
var (
buildContext = build.Default
goroot = filepath.Clean(runtime.GOROOT())
gorootSrc = filepath.Join(goroot, "src")
)
// importPathsNoDotExpansion returns the import paths to use for the given
// command line, but it does no ... expansion.
func importPathsNoDotExpansion(args []string) []string {
if len(args) == 0 {
return []string{"."}
}
var out []string
for _, a := range args {
// Arguments are supposed to be import paths, but
// as a courtesy to Windows developers, rewrite \ to /
// in command-line arguments. Handles .\... and so on.
if filepath.Separator == '\\' {
a = strings.Replace(a, `\`, `/`, -1)
}
// Put argument in canonical form, but preserve leading ./.
if strings.HasPrefix(a, "./") {
a = "./" + path.Clean(a)
if a == "./." {
a = "."
}
} else {
a = path.Clean(a)
}
if a == "all" || a == "std" {
out = append(out, allPackages(a)...)
continue
}
out = append(out, a)
}
return out
}
// importPaths returns the import paths to use for the given command line.
func importPaths(args []string) []string {
args = importPathsNoDotExpansion(args)
var out []string
for _, a := range args {
if strings.Contains(a, "...") {
if build.IsLocalImport(a) {
out = append(out, allPackagesInFS(a)...)
} else {
out = append(out, allPackages(a)...)
}
continue
}
out = append(out, a)
}
return out
}
// matchPattern(pattern)(name) reports whether
// name matches pattern. Pattern is a limited glob
// pattern in which '...' means 'any string' and there
// is no other special syntax.
func matchPattern(pattern string) func(name string) bool {
re := regexp.QuoteMeta(pattern)
re = strings.Replace(re, `\.\.\.`, `.*`, -1)
// Special case: foo/... matches foo too.
if strings.HasSuffix(re, `/.*`) {
re = re[:len(re)-len(`/.*`)] + `(/.*)?`
}
reg := regexp.MustCompile(`^` + re + `$`)
return func(name string) bool {
return reg.MatchString(name)
}
}
// hasPathPrefix reports whether the path s begins with the
// elements in prefix.
func hasPathPrefix(s, prefix string) bool {
switch {
default:
return false
case len(s) == len(prefix):
return s == prefix
case len(s) > len(prefix):
if prefix != "" && prefix[len(prefix)-1] == '/' {
return strings.HasPrefix(s, prefix)
}
return s[len(prefix)] == '/' && s[:len(prefix)] == prefix
}
}
// treeCanMatchPattern(pattern)(name) reports whether
// name or children of name can possibly match pattern.
// Pattern is the same limited glob accepted by matchPattern.
func treeCanMatchPattern(pattern string) func(name string) bool {
wildCard := false
if i := strings.Index(pattern, "..."); i >= 0 {
wildCard = true
pattern = pattern[:i]
}
return func(name string) bool {
return len(name) <= len(pattern) && hasPathPrefix(pattern, name) ||
wildCard && strings.HasPrefix(name, pattern)
}
}
// allPackages returns all the packages that can be found
// under the $GOPATH directories and $GOROOT matching pattern.
// The pattern is either "all" (all packages), "std" (standard packages)
// or a path including "...".
func allPackages(pattern string) []string {
pkgs := matchPackages(pattern)
if len(pkgs) == 0 {
fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
}
return pkgs
}
func matchPackages(pattern string) []string {
match := func(string) bool { return true }
treeCanMatch := func(string) bool { return true }
if pattern != "all" && pattern != "std" {
match = matchPattern(pattern)
treeCanMatch = treeCanMatchPattern(pattern)
}
have := map[string]bool{
"builtin": true, // ignore pseudo-package that exists only for documentation
}
if !buildContext.CgoEnabled {
have["runtime/cgo"] = true // ignore during walk
}
var pkgs []string
// Commands
cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator)
filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error {
if err != nil || !fi.IsDir() || path == cmd {
return nil
}
name := path[len(cmd):]
if !treeCanMatch(name) {
return filepath.SkipDir
}
// Commands are all in cmd/, not in subdirectories.
if strings.Contains(name, string(filepath.Separator)) {
return filepath.SkipDir
}
// We use, e.g., cmd/gofmt as the pseudo import path for gofmt.
name = "cmd/" + name
if have[name] {
return nil
}
have[name] = true
if !match(name) {
return nil
}
_, err = buildContext.ImportDir(path, 0)
if err != nil {
if _, noGo := err.(*build.NoGoError); !noGo {
log.Print(err)
}
return nil
}
pkgs = append(pkgs, name)
return nil
})
for _, src := range buildContext.SrcDirs() {
if (pattern == "std" || pattern == "cmd") && src != gorootSrc {
continue
}
src = filepath.Clean(src) + string(filepath.Separator)
root := src
if pattern == "cmd" {
root += "cmd" + string(filepath.Separator)
}
filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
if err != nil || !fi.IsDir() || path == src {
return nil
}
// Avoid .foo, _foo, and testdata directory trees.
_, elem := filepath.Split(path)
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
return filepath.SkipDir
}
name := filepath.ToSlash(path[len(src):])
if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") {
// The name "std" is only the standard library.
// If the name is cmd, it's the root of the command tree.
return filepath.SkipDir
}
if !treeCanMatch(name) {
return filepath.SkipDir
}
if have[name] {
return nil
}
have[name] = true
if !match(name) {
return nil
}
_, err = buildContext.ImportDir(path, 0)
if err != nil {
if _, noGo := err.(*build.NoGoError); noGo {
return nil
}
}
pkgs = append(pkgs, name)
return nil
})
}
return pkgs
}
// allPackagesInFS is like allPackages but is passed a pattern
// beginning ./ or ../, meaning it should scan the tree rooted
// at the given directory. There are ... in the pattern too.
func allPackagesInFS(pattern string) []string {
pkgs := matchPackagesInFS(pattern)
if len(pkgs) == 0 {
fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
}
return pkgs
}
func matchPackagesInFS(pattern string) []string {
// Find directory to begin the scan.
// Could be smarter but this one optimization
// is enough for now, since ... is usually at the
// end of a path.
i := strings.Index(pattern, "...")
dir, _ := path.Split(pattern[:i])
// pattern begins with ./ or ../.
// path.Clean will discard the ./ but not the ../.
// We need to preserve the ./ for pattern matching
// and in the returned import paths.
prefix := ""
if strings.HasPrefix(pattern, "./") {
prefix = "./"
}
match := matchPattern(pattern)
var pkgs []string
filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error {
if err != nil || !fi.IsDir() {
return nil
}
if path == dir {
// filepath.Walk starts at dir and recurses. For the recursive case,
// the path is the result of filepath.Join, which calls filepath.Clean.
// The initial case is not Cleaned, though, so we do this explicitly.
//
// This converts a path like "./io/" to "io". Without this step, running
// "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io
// package, because prepending the prefix "./" to the unclean path would
// result in "././io", and match("././io") returns false.
path = filepath.Clean(path)
}
// Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..".
_, elem := filepath.Split(path)
dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".."
if dot || strings.HasPrefix(elem, "_") || elem == "testdata" {
return filepath.SkipDir
}
name := prefix + filepath.ToSlash(path)
if !match(name) {
return nil
}
if _, err = build.ImportDir(path, 0); err != nil {
if _, noGo := err.(*build.NoGoError); !noGo {
log.Print(err)
}
return nil
}
pkgs = append(pkgs, name)
return nil
})
return pkgs
}

13
vendor/golang.org/x/lint/golint/importcomment.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright (c) 2018 The Go Authors. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd.
// +build go1.12
// Require use of the correct import path only for Go 1.12+ users, so
// any breakages coincide with people updating their CI configs or
// whatnot.
package main // import "golang.org/x/lint/golint"

1693
vendor/golang.org/x/lint/lint.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

3
vendor/golang.org/x/tools/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/tools/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/tools/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/tools/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

627
vendor/golang.org/x/tools/go/ast/astutil/enclosing.go generated vendored Normal file
View File

@ -0,0 +1,627 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil
// This file defines utilities for working with source positions.
import (
"fmt"
"go/ast"
"go/token"
"sort"
)
// PathEnclosingInterval returns the node that encloses the source
// interval [start, end), and all its ancestors up to the AST root.
//
// The definition of "enclosing" used by this function considers
// additional whitespace abutting a node to be enclosed by it.
// In this example:
//
// z := x + y // add them
// <-A->
// <----B----->
//
// the ast.BinaryExpr(+) node is considered to enclose interval B
// even though its [Pos()..End()) is actually only interval A.
// This behaviour makes user interfaces more tolerant of imperfect
// input.
//
// This function treats tokens as nodes, though they are not included
// in the result. e.g. PathEnclosingInterval("+") returns the
// enclosing ast.BinaryExpr("x + y").
//
// If start==end, the 1-char interval following start is used instead.
//
// The 'exact' result is true if the interval contains only path[0]
// and perhaps some adjacent whitespace. It is false if the interval
// overlaps multiple children of path[0], or if it contains only
// interior whitespace of path[0].
// In this example:
//
// z := x + y // add them
// <--C--> <---E-->
// ^
// D
//
// intervals C, D and E are inexact. C is contained by the
// z-assignment statement, because it spans three of its children (:=,
// x, +). So too is the 1-char interval D, because it contains only
// interior whitespace of the assignment. E is considered interior
// whitespace of the BlockStmt containing the assignment.
//
// Precondition: [start, end) both lie within the same file as root.
// TODO(adonovan): return (nil, false) in this case and remove precond.
// Requires FileSet; see loader.tokenFileContainsPos.
//
// Postcondition: path is never nil; it always contains at least 'root'.
//
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
// fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
// Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
var visit func(node ast.Node) bool
visit = func(node ast.Node) bool {
path = append(path, node)
nodePos := node.Pos()
nodeEnd := node.End()
// fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
// Intersect [start, end) with interval of node.
if start < nodePos {
start = nodePos
}
if end > nodeEnd {
end = nodeEnd
}
// Find sole child that contains [start, end).
children := childrenOf(node)
l := len(children)
for i, child := range children {
// [childPos, childEnd) is unaugmented interval of child.
childPos := child.Pos()
childEnd := child.End()
// [augPos, augEnd) is whitespace-augmented interval of child.
augPos := childPos
augEnd := childEnd
if i > 0 {
augPos = children[i-1].End() // start of preceding whitespace
}
if i < l-1 {
nextChildPos := children[i+1].Pos()
// Does [start, end) lie between child and next child?
if start >= augEnd && end <= nextChildPos {
return false // inexact match
}
augEnd = nextChildPos // end of following whitespace
}
// fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
// i, augPos, augEnd, start, end) // debugging
// Does augmented child strictly contain [start, end)?
if augPos <= start && end <= augEnd {
_, isToken := child.(tokenNode)
return isToken || visit(child)
}
// Does [start, end) overlap multiple children?
// i.e. left-augmented child contains start
// but LR-augmented child does not contain end.
if start < childEnd && end > augEnd {
break
}
}
// No single child contained [start, end),
// so node is the result. Is it exact?
// (It's tempting to put this condition before the
// child loop, but it gives the wrong result in the
// case where a node (e.g. ExprStmt) and its sole
// child have equal intervals.)
if start == nodePos && end == nodeEnd {
return true // exact match
}
return false // inexact: overlaps multiple children
}
if start > end {
start, end = end, start
}
if start < root.End() && end > root.Pos() {
if start == end {
end = start + 1 // empty interval => interval of size 1
}
exact = visit(root)
// Reverse the path:
for i, l := 0, len(path); i < l/2; i++ {
path[i], path[l-1-i] = path[l-1-i], path[i]
}
} else {
// Selection lies within whitespace preceding the
// first (or following the last) declaration in the file.
// The result nonetheless always includes the ast.File.
path = append(path, root)
}
return
}
// tokenNode is a dummy implementation of ast.Node for a single token.
// They are used transiently by PathEnclosingInterval but never escape
// this package.
//
type tokenNode struct {
pos token.Pos
end token.Pos
}
func (n tokenNode) Pos() token.Pos {
return n.pos
}
func (n tokenNode) End() token.Pos {
return n.end
}
func tok(pos token.Pos, len int) ast.Node {
return tokenNode{pos, pos + token.Pos(len)}
}
// childrenOf returns the direct non-nil children of ast.Node n.
// It may include fake ast.Node implementations for bare tokens.
// it is not safe to call (e.g.) ast.Walk on such nodes.
//
func childrenOf(n ast.Node) []ast.Node {
var children []ast.Node
// First add nodes for all true subtrees.
ast.Inspect(n, func(node ast.Node) bool {
if node == n { // push n
return true // recur
}
if node != nil { // push child
children = append(children, node)
}
return false // no recursion
})
// Then add fake Nodes for bare tokens.
switch n := n.(type) {
case *ast.ArrayType:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Elt.End(), len("]")))
case *ast.AssignStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.BasicLit:
children = append(children,
tok(n.ValuePos, len(n.Value)))
case *ast.BinaryExpr:
children = append(children, tok(n.OpPos, len(n.Op.String())))
case *ast.BlockStmt:
children = append(children,
tok(n.Lbrace, len("{")),
tok(n.Rbrace, len("}")))
case *ast.BranchStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.CallExpr:
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
if n.Ellipsis != 0 {
children = append(children, tok(n.Ellipsis, len("...")))
}
case *ast.CaseClause:
if n.List == nil {
children = append(children,
tok(n.Case, len("default")))
} else {
children = append(children,
tok(n.Case, len("case")))
}
children = append(children, tok(n.Colon, len(":")))
case *ast.ChanType:
switch n.Dir {
case ast.RECV:
children = append(children, tok(n.Begin, len("<-chan")))
case ast.SEND:
children = append(children, tok(n.Begin, len("chan<-")))
case ast.RECV | ast.SEND:
children = append(children, tok(n.Begin, len("chan")))
}
case *ast.CommClause:
if n.Comm == nil {
children = append(children,
tok(n.Case, len("default")))
} else {
children = append(children,
tok(n.Case, len("case")))
}
children = append(children, tok(n.Colon, len(":")))
case *ast.Comment:
// nop
case *ast.CommentGroup:
// nop
case *ast.CompositeLit:
children = append(children,
tok(n.Lbrace, len("{")),
tok(n.Rbrace, len("{")))
case *ast.DeclStmt:
// nop
case *ast.DeferStmt:
children = append(children,
tok(n.Defer, len("defer")))
case *ast.Ellipsis:
children = append(children,
tok(n.Ellipsis, len("...")))
case *ast.EmptyStmt:
// nop
case *ast.ExprStmt:
// nop
case *ast.Field:
// TODO(adonovan): Field.{Doc,Comment,Tag}?
case *ast.FieldList:
children = append(children,
tok(n.Opening, len("(")),
tok(n.Closing, len(")")))
case *ast.File:
// TODO test: Doc
children = append(children,
tok(n.Package, len("package")))
case *ast.ForStmt:
children = append(children,
tok(n.For, len("for")))
case *ast.FuncDecl:
// TODO(adonovan): FuncDecl.Comment?
// Uniquely, FuncDecl breaks the invariant that
// preorder traversal yields tokens in lexical order:
// in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
//
// As a workaround, we inline the case for FuncType
// here and order things correctly.
//
children = nil // discard ast.Walk(FuncDecl) info subtrees
children = append(children, tok(n.Type.Func, len("func")))
if n.Recv != nil {
children = append(children, n.Recv)
}
children = append(children, n.Name)
if n.Type.Params != nil {
children = append(children, n.Type.Params)
}
if n.Type.Results != nil {
children = append(children, n.Type.Results)
}
if n.Body != nil {
children = append(children, n.Body)
}
case *ast.FuncLit:
// nop
case *ast.FuncType:
if n.Func != 0 {
children = append(children,
tok(n.Func, len("func")))
}
case *ast.GenDecl:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
if n.Lparen != 0 {
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
}
case *ast.GoStmt:
children = append(children,
tok(n.Go, len("go")))
case *ast.Ident:
children = append(children,
tok(n.NamePos, len(n.Name)))
case *ast.IfStmt:
children = append(children,
tok(n.If, len("if")))
case *ast.ImportSpec:
// TODO(adonovan): ImportSpec.{Doc,EndPos}?
case *ast.IncDecStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.IndexExpr:
children = append(children,
tok(n.Lbrack, len("{")),
tok(n.Rbrack, len("}")))
case *ast.InterfaceType:
children = append(children,
tok(n.Interface, len("interface")))
case *ast.KeyValueExpr:
children = append(children,
tok(n.Colon, len(":")))
case *ast.LabeledStmt:
children = append(children,
tok(n.Colon, len(":")))
case *ast.MapType:
children = append(children,
tok(n.Map, len("map")))
case *ast.ParenExpr:
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
case *ast.RangeStmt:
children = append(children,
tok(n.For, len("for")),
tok(n.TokPos, len(n.Tok.String())))
case *ast.ReturnStmt:
children = append(children,
tok(n.Return, len("return")))
case *ast.SelectStmt:
children = append(children,
tok(n.Select, len("select")))
case *ast.SelectorExpr:
// nop
case *ast.SendStmt:
children = append(children,
tok(n.Arrow, len("<-")))
case *ast.SliceExpr:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Rbrack, len("]")))
case *ast.StarExpr:
children = append(children, tok(n.Star, len("*")))
case *ast.StructType:
children = append(children, tok(n.Struct, len("struct")))
case *ast.SwitchStmt:
children = append(children, tok(n.Switch, len("switch")))
case *ast.TypeAssertExpr:
children = append(children,
tok(n.Lparen-1, len(".")),
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
case *ast.TypeSpec:
// TODO(adonovan): TypeSpec.{Doc,Comment}?
case *ast.TypeSwitchStmt:
children = append(children, tok(n.Switch, len("switch")))
case *ast.UnaryExpr:
children = append(children, tok(n.OpPos, len(n.Op.String())))
case *ast.ValueSpec:
// TODO(adonovan): ValueSpec.{Doc,Comment}?
case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
// nop
}
// TODO(adonovan): opt: merge the logic of ast.Inspect() into
// the switch above so we can make interleaved callbacks for
// both Nodes and Tokens in the right order and avoid the need
// to sort.
sort.Sort(byPos(children))
return children
}
type byPos []ast.Node
func (sl byPos) Len() int {
return len(sl)
}
func (sl byPos) Less(i, j int) bool {
return sl[i].Pos() < sl[j].Pos()
}
func (sl byPos) Swap(i, j int) {
sl[i], sl[j] = sl[j], sl[i]
}
// NodeDescription returns a description of the concrete type of n suitable
// for a user interface.
//
// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
// StarExpr) we could be much more specific given the path to the AST
// root. Perhaps we should do that.
//
func NodeDescription(n ast.Node) string {
switch n := n.(type) {
case *ast.ArrayType:
return "array type"
case *ast.AssignStmt:
return "assignment"
case *ast.BadDecl:
return "bad declaration"
case *ast.BadExpr:
return "bad expression"
case *ast.BadStmt:
return "bad statement"
case *ast.BasicLit:
return "basic literal"
case *ast.BinaryExpr:
return fmt.Sprintf("binary %s operation", n.Op)
case *ast.BlockStmt:
return "block"
case *ast.BranchStmt:
switch n.Tok {
case token.BREAK:
return "break statement"
case token.CONTINUE:
return "continue statement"
case token.GOTO:
return "goto statement"
case token.FALLTHROUGH:
return "fall-through statement"
}
case *ast.CallExpr:
if len(n.Args) == 1 && !n.Ellipsis.IsValid() {
return "function call (or conversion)"
}
return "function call"
case *ast.CaseClause:
return "case clause"
case *ast.ChanType:
return "channel type"
case *ast.CommClause:
return "communication clause"
case *ast.Comment:
return "comment"
case *ast.CommentGroup:
return "comment group"
case *ast.CompositeLit:
return "composite literal"
case *ast.DeclStmt:
return NodeDescription(n.Decl) + " statement"
case *ast.DeferStmt:
return "defer statement"
case *ast.Ellipsis:
return "ellipsis"
case *ast.EmptyStmt:
return "empty statement"
case *ast.ExprStmt:
return "expression statement"
case *ast.Field:
// Can be any of these:
// struct {x, y int} -- struct field(s)
// struct {T} -- anon struct field
// interface {I} -- interface embedding
// interface {f()} -- interface method
// func (A) func(B) C -- receiver, param(s), result(s)
return "field/method/parameter"
case *ast.FieldList:
return "field/method/parameter list"
case *ast.File:
return "source file"
case *ast.ForStmt:
return "for loop"
case *ast.FuncDecl:
return "function declaration"
case *ast.FuncLit:
return "function literal"
case *ast.FuncType:
return "function type"
case *ast.GenDecl:
switch n.Tok {
case token.IMPORT:
return "import declaration"
case token.CONST:
return "constant declaration"
case token.TYPE:
return "type declaration"
case token.VAR:
return "variable declaration"
}
case *ast.GoStmt:
return "go statement"
case *ast.Ident:
return "identifier"
case *ast.IfStmt:
return "if statement"
case *ast.ImportSpec:
return "import specification"
case *ast.IncDecStmt:
if n.Tok == token.INC {
return "increment statement"
}
return "decrement statement"
case *ast.IndexExpr:
return "index expression"
case *ast.InterfaceType:
return "interface type"
case *ast.KeyValueExpr:
return "key/value association"
case *ast.LabeledStmt:
return "statement label"
case *ast.MapType:
return "map type"
case *ast.Package:
return "package"
case *ast.ParenExpr:
return "parenthesized " + NodeDescription(n.X)
case *ast.RangeStmt:
return "range loop"
case *ast.ReturnStmt:
return "return statement"
case *ast.SelectStmt:
return "select statement"
case *ast.SelectorExpr:
return "selector"
case *ast.SendStmt:
return "channel send"
case *ast.SliceExpr:
return "slice expression"
case *ast.StarExpr:
return "*-operation" // load/store expr or pointer type
case *ast.StructType:
return "struct type"
case *ast.SwitchStmt:
return "switch statement"
case *ast.TypeAssertExpr:
return "type assertion"
case *ast.TypeSpec:
return "type specification"
case *ast.TypeSwitchStmt:
return "type switch"
case *ast.UnaryExpr:
return fmt.Sprintf("unary %s operation", n.Op)
case *ast.ValueSpec:
return "value specification"
}
panic(fmt.Sprintf("unexpected node type: %T", n))
}

481
vendor/golang.org/x/tools/go/ast/astutil/imports.go generated vendored Normal file
View File

@ -0,0 +1,481 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package astutil contains common utilities for working with the Go AST.
package astutil // import "golang.org/x/tools/go/ast/astutil"
import (
"fmt"
"go/ast"
"go/token"
"strconv"
"strings"
)
// AddImport adds the import path to the file f, if absent.
func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) {
return AddNamedImport(fset, f, "", path)
}
// AddNamedImport adds the import with the given name and path to the file f, if absent.
// If name is not empty, it is used to rename the import.
//
// For example, calling
// AddNamedImport(fset, f, "pathpkg", "path")
// adds
// import pathpkg "path"
func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) {
if imports(f, name, path) {
return false
}
newImport := &ast.ImportSpec{
Path: &ast.BasicLit{
Kind: token.STRING,
Value: strconv.Quote(path),
},
}
if name != "" {
newImport.Name = &ast.Ident{Name: name}
}
// Find an import decl to add to.
// The goal is to find an existing import
// whose import path has the longest shared
// prefix with path.
var (
bestMatch = -1 // length of longest shared prefix
lastImport = -1 // index in f.Decls of the file's final import decl
impDecl *ast.GenDecl // import decl containing the best match
impIndex = -1 // spec index in impDecl containing the best match
isThirdPartyPath = isThirdParty(path)
)
for i, decl := range f.Decls {
gen, ok := decl.(*ast.GenDecl)
if ok && gen.Tok == token.IMPORT {
lastImport = i
// Do not add to import "C", to avoid disrupting the
// association with its doc comment, breaking cgo.
if declImports(gen, "C") {
continue
}
// Match an empty import decl if that's all that is available.
if len(gen.Specs) == 0 && bestMatch == -1 {
impDecl = gen
}
// Compute longest shared prefix with imports in this group and find best
// matched import spec.
// 1. Always prefer import spec with longest shared prefix.
// 2. While match length is 0,
// - for stdlib package: prefer first import spec.
// - for third party package: prefer first third party import spec.
// We cannot use last import spec as best match for third party package
// because grouped imports are usually placed last by goimports -local
// flag.
// See issue #19190.
seenAnyThirdParty := false
for j, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
p := importPath(impspec)
n := matchLen(p, path)
if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) {
bestMatch = n
impDecl = gen
impIndex = j
}
seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p)
}
}
}
// If no import decl found, add one after the last import.
if impDecl == nil {
impDecl = &ast.GenDecl{
Tok: token.IMPORT,
}
if lastImport >= 0 {
impDecl.TokPos = f.Decls[lastImport].End()
} else {
// There are no existing imports.
// Our new import, preceded by a blank line, goes after the package declaration
// and after the comment, if any, that starts on the same line as the
// package declaration.
impDecl.TokPos = f.Package
file := fset.File(f.Package)
pkgLine := file.Line(f.Package)
for _, c := range f.Comments {
if file.Line(c.Pos()) > pkgLine {
break
}
// +2 for a blank line
impDecl.TokPos = c.End() + 2
}
}
f.Decls = append(f.Decls, nil)
copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
f.Decls[lastImport+1] = impDecl
}
// Insert new import at insertAt.
insertAt := 0
if impIndex >= 0 {
// insert after the found import
insertAt = impIndex + 1
}
impDecl.Specs = append(impDecl.Specs, nil)
copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
impDecl.Specs[insertAt] = newImport
pos := impDecl.Pos()
if insertAt > 0 {
// If there is a comment after an existing import, preserve the comment
// position by adding the new import after the comment.
if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
pos = spec.Comment.End()
} else {
// Assign same position as the previous import,
// so that the sorter sees it as being in the same block.
pos = impDecl.Specs[insertAt-1].Pos()
}
}
if newImport.Name != nil {
newImport.Name.NamePos = pos
}
newImport.Path.ValuePos = pos
newImport.EndPos = pos
// Clean up parens. impDecl contains at least one spec.
if len(impDecl.Specs) == 1 {
// Remove unneeded parens.
impDecl.Lparen = token.NoPos
} else if !impDecl.Lparen.IsValid() {
// impDecl needs parens added.
impDecl.Lparen = impDecl.Specs[0].Pos()
}
f.Imports = append(f.Imports, newImport)
if len(f.Decls) <= 1 {
return true
}
// Merge all the import declarations into the first one.
var first *ast.GenDecl
for i := 0; i < len(f.Decls); i++ {
decl := f.Decls[i]
gen, ok := decl.(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
continue
}
if first == nil {
first = gen
continue // Don't touch the first one.
}
// We now know there is more than one package in this import
// declaration. Ensure that it ends up parenthesized.
first.Lparen = first.Pos()
// Move the imports of the other import declaration to the first one.
for _, spec := range gen.Specs {
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
first.Specs = append(first.Specs, spec)
}
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
i--
}
return true
}
func isThirdParty(importPath string) bool {
// Third party package import path usually contains "." (".com", ".org", ...)
// This logic is taken from golang.org/x/tools/imports package.
return strings.Contains(importPath, ".")
}
// DeleteImport deletes the import path from the file f, if present.
// If there are duplicate import declarations, all matching ones are deleted.
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
return DeleteNamedImport(fset, f, "", path)
}
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
// If there are duplicate import declarations, all matching ones are deleted.
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
var delspecs []*ast.ImportSpec
var delcomments []*ast.CommentGroup
// Find the import nodes that import path, if any.
for i := 0; i < len(f.Decls); i++ {
decl := f.Decls[i]
gen, ok := decl.(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT {
continue
}
for j := 0; j < len(gen.Specs); j++ {
spec := gen.Specs[j]
impspec := spec.(*ast.ImportSpec)
if importName(impspec) != name || importPath(impspec) != path {
continue
}
// We found an import spec that imports path.
// Delete it.
delspecs = append(delspecs, impspec)
deleted = true
copy(gen.Specs[j:], gen.Specs[j+1:])
gen.Specs = gen.Specs[:len(gen.Specs)-1]
// If this was the last import spec in this decl,
// delete the decl, too.
if len(gen.Specs) == 0 {
copy(f.Decls[i:], f.Decls[i+1:])
f.Decls = f.Decls[:len(f.Decls)-1]
i--
break
} else if len(gen.Specs) == 1 {
if impspec.Doc != nil {
delcomments = append(delcomments, impspec.Doc)
}
if impspec.Comment != nil {
delcomments = append(delcomments, impspec.Comment)
}
for _, cg := range f.Comments {
// Found comment on the same line as the import spec.
if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line {
delcomments = append(delcomments, cg)
break
}
}
spec := gen.Specs[0].(*ast.ImportSpec)
// Move the documentation right after the import decl.
if spec.Doc != nil {
for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line {
fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
}
}
for _, cg := range f.Comments {
if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line {
for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line {
fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
}
break
}
}
}
if j > 0 {
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
line := fset.Position(impspec.Path.ValuePos).Line
// We deleted an entry but now there may be
// a blank line-sized hole where the import was.
if line-lastLine > 1 {
// There was a blank line immediately preceding the deleted import,
// so there's no need to close the hole.
// Do nothing.
} else if line != fset.File(gen.Rparen).LineCount() {
// There was no blank line. Close the hole.
fset.File(gen.Rparen).MergeLine(line)
}
}
j--
}
}
// Delete imports from f.Imports.
for i := 0; i < len(f.Imports); i++ {
imp := f.Imports[i]
for j, del := range delspecs {
if imp == del {
copy(f.Imports[i:], f.Imports[i+1:])
f.Imports = f.Imports[:len(f.Imports)-1]
copy(delspecs[j:], delspecs[j+1:])
delspecs = delspecs[:len(delspecs)-1]
i--
break
}
}
}
// Delete comments from f.Comments.
for i := 0; i < len(f.Comments); i++ {
cg := f.Comments[i]
for j, del := range delcomments {
if cg == del {
copy(f.Comments[i:], f.Comments[i+1:])
f.Comments = f.Comments[:len(f.Comments)-1]
copy(delcomments[j:], delcomments[j+1:])
delcomments = delcomments[:len(delcomments)-1]
i--
break
}
}
}
if len(delspecs) > 0 {
panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
}
return
}
// RewriteImport rewrites any import of path oldPath to path newPath.
func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
for _, imp := range f.Imports {
if importPath(imp) == oldPath {
rewrote = true
// record old End, because the default is to compute
// it using the length of imp.Path.Value.
imp.EndPos = imp.End()
imp.Path.Value = strconv.Quote(newPath)
}
}
return
}
// UsesImport reports whether a given import is used.
func UsesImport(f *ast.File, path string) (used bool) {
spec := importSpec(f, path)
if spec == nil {
return
}
name := spec.Name.String()
switch name {
case "<nil>":
// If the package name is not explicitly specified,
// make an educated guess. This is not guaranteed to be correct.
lastSlash := strings.LastIndex(path, "/")
if lastSlash == -1 {
name = path
} else {
name = path[lastSlash+1:]
}
case "_", ".":
// Not sure if this import is used - err on the side of caution.
return true
}
ast.Walk(visitFn(func(n ast.Node) {
sel, ok := n.(*ast.SelectorExpr)
if ok && isTopName(sel.X, name) {
used = true
}
}), f)
return
}
type visitFn func(node ast.Node)
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
fn(node)
return fn
}
// imports reports whether f has an import with the specified name and path.
func imports(f *ast.File, name, path string) bool {
for _, s := range f.Imports {
if importName(s) == name && importPath(s) == path {
return true
}
}
return false
}
// importSpec returns the import spec if f imports path,
// or nil otherwise.
func importSpec(f *ast.File, path string) *ast.ImportSpec {
for _, s := range f.Imports {
if importPath(s) == path {
return s
}
}
return nil
}
// importName returns the name of s,
// or "" if the import is not named.
func importName(s *ast.ImportSpec) string {
if s.Name == nil {
return ""
}
return s.Name.Name
}
// importPath returns the unquoted import path of s,
// or "" if the path is not properly quoted.
func importPath(s *ast.ImportSpec) string {
t, err := strconv.Unquote(s.Path.Value)
if err != nil {
return ""
}
return t
}
// declImports reports whether gen contains an import of path.
func declImports(gen *ast.GenDecl, path string) bool {
if gen.Tok != token.IMPORT {
return false
}
for _, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
if importPath(impspec) == path {
return true
}
}
return false
}
// matchLen returns the length of the longest path segment prefix shared by x and y.
func matchLen(x, y string) int {
n := 0
for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
if x[i] == '/' {
n++
}
}
return n
}
// isTopName returns true if n is a top-level unresolved identifier with the given name.
func isTopName(n ast.Expr, name string) bool {
id, ok := n.(*ast.Ident)
return ok && id.Name == name && id.Obj == nil
}
// Imports returns the file imports grouped by paragraph.
func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
var groups [][]*ast.ImportSpec
for _, decl := range f.Decls {
genDecl, ok := decl.(*ast.GenDecl)
if !ok || genDecl.Tok != token.IMPORT {
break
}
group := []*ast.ImportSpec{}
var lastLine int
for _, spec := range genDecl.Specs {
importSpec := spec.(*ast.ImportSpec)
pos := importSpec.Path.ValuePos
line := fset.Position(pos).Line
if lastLine > 0 && pos > 0 && line-lastLine > 1 {
groups = append(groups, group)
group = []*ast.ImportSpec{}
}
group = append(group, importSpec)
lastLine = line
}
groups = append(groups, group)
}
return groups
}

477
vendor/golang.org/x/tools/go/ast/astutil/rewrite.go generated vendored Normal file
View File

@ -0,0 +1,477 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil
import (
"fmt"
"go/ast"
"reflect"
"sort"
)
// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
// before and/or after the node's children, using a Cursor describing
// the current node and providing operations on it.
//
// The return value of ApplyFunc controls the syntax tree traversal.
// See Apply for details.
type ApplyFunc func(*Cursor) bool
// Apply traverses a syntax tree recursively, starting with root,
// and calling pre and post for each node as described below.
// Apply returns the syntax tree, possibly modified.
//
// If pre is not nil, it is called for each node before the node's
// children are traversed (pre-order). If pre returns false, no
// children are traversed, and post is not called for that node.
//
// If post is not nil, and a prior call of pre didn't return false,
// post is called for each node after its children are traversed
// (post-order). If post returns false, traversal is terminated and
// Apply returns immediately.
//
// Only fields that refer to AST nodes are considered children;
// i.e., token.Pos, Scopes, Objects, and fields of basic types
// (strings, etc.) are ignored.
//
// Children are traversed in the order in which they appear in the
// respective node's struct definition. A package's files are
// traversed in the filenames' alphabetical order.
//
func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
parent := &struct{ ast.Node }{root}
defer func() {
if r := recover(); r != nil && r != abort {
panic(r)
}
result = parent.Node
}()
a := &application{pre: pre, post: post}
a.apply(parent, "Node", nil, root)
return
}
var abort = new(int) // singleton, to signal termination of Apply
// A Cursor describes a node encountered during Apply.
// Information about the node and its parent is available
// from the Node, Parent, Name, and Index methods.
//
// If p is a variable of type and value of the current parent node
// c.Parent(), and f is the field identifier with name c.Name(),
// the following invariants hold:
//
// p.f == c.Node() if c.Index() < 0
// p.f[c.Index()] == c.Node() if c.Index() >= 0
//
// The methods Replace, Delete, InsertBefore, and InsertAfter
// can be used to change the AST without disrupting Apply.
type Cursor struct {
parent ast.Node
name string
iter *iterator // valid if non-nil
node ast.Node
}
// Node returns the current Node.
func (c *Cursor) Node() ast.Node { return c.node }
// Parent returns the parent of the current Node.
func (c *Cursor) Parent() ast.Node { return c.parent }
// Name returns the name of the parent Node field that contains the current Node.
// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns
// the filename for the current Node.
func (c *Cursor) Name() string { return c.name }
// Index reports the index >= 0 of the current Node in the slice of Nodes that
// contains it, or a value < 0 if the current Node is not part of a slice.
// The index of the current node changes if InsertBefore is called while
// processing the current node.
func (c *Cursor) Index() int {
if c.iter != nil {
return c.iter.index
}
return -1
}
// field returns the current node's parent field value.
func (c *Cursor) field() reflect.Value {
return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name)
}
// Replace replaces the current Node with n.
// The replacement node is not walked by Apply.
func (c *Cursor) Replace(n ast.Node) {
if _, ok := c.node.(*ast.File); ok {
file, ok := n.(*ast.File)
if !ok {
panic("attempt to replace *ast.File with non-*ast.File")
}
c.parent.(*ast.Package).Files[c.name] = file
return
}
v := c.field()
if i := c.Index(); i >= 0 {
v = v.Index(i)
}
v.Set(reflect.ValueOf(n))
}
// Delete deletes the current Node from its containing slice.
// If the current Node is not part of a slice, Delete panics.
// As a special case, if the current node is a package file,
// Delete removes it from the package's Files map.
func (c *Cursor) Delete() {
if _, ok := c.node.(*ast.File); ok {
delete(c.parent.(*ast.Package).Files, c.name)
return
}
i := c.Index()
if i < 0 {
panic("Delete node not contained in slice")
}
v := c.field()
l := v.Len()
reflect.Copy(v.Slice(i, l), v.Slice(i+1, l))
v.Index(l - 1).Set(reflect.Zero(v.Type().Elem()))
v.SetLen(l - 1)
c.iter.step--
}
// InsertAfter inserts n after the current Node in its containing slice.
// If the current Node is not part of a slice, InsertAfter panics.
// Apply does not walk n.
func (c *Cursor) InsertAfter(n ast.Node) {
i := c.Index()
if i < 0 {
panic("InsertAfter node not contained in slice")
}
v := c.field()
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
l := v.Len()
reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l))
v.Index(i + 1).Set(reflect.ValueOf(n))
c.iter.step++
}
// InsertBefore inserts n before the current Node in its containing slice.
// If the current Node is not part of a slice, InsertBefore panics.
// Apply will not walk n.
func (c *Cursor) InsertBefore(n ast.Node) {
i := c.Index()
if i < 0 {
panic("InsertBefore node not contained in slice")
}
v := c.field()
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
l := v.Len()
reflect.Copy(v.Slice(i+1, l), v.Slice(i, l))
v.Index(i).Set(reflect.ValueOf(n))
c.iter.index++
}
// application carries all the shared data so we can pass it around cheaply.
type application struct {
pre, post ApplyFunc
cursor Cursor
iter iterator
}
func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
// convert typed nil into untyped nil
if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
n = nil
}
// avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead
saved := a.cursor
a.cursor.parent = parent
a.cursor.name = name
a.cursor.iter = iter
a.cursor.node = n
if a.pre != nil && !a.pre(&a.cursor) {
a.cursor = saved
return
}
// walk children
// (the order of the cases matches the order of the corresponding node types in go/ast)
switch n := n.(type) {
case nil:
// nothing to do
// Comments and fields
case *ast.Comment:
// nothing to do
case *ast.CommentGroup:
if n != nil {
a.applyList(n, "List")
}
case *ast.Field:
a.apply(n, "Doc", nil, n.Doc)
a.applyList(n, "Names")
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Tag", nil, n.Tag)
a.apply(n, "Comment", nil, n.Comment)
case *ast.FieldList:
a.applyList(n, "List")
// Expressions
case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
// nothing to do
case *ast.Ellipsis:
a.apply(n, "Elt", nil, n.Elt)
case *ast.FuncLit:
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Body", nil, n.Body)
case *ast.CompositeLit:
a.apply(n, "Type", nil, n.Type)
a.applyList(n, "Elts")
case *ast.ParenExpr:
a.apply(n, "X", nil, n.X)
case *ast.SelectorExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Sel", nil, n.Sel)
case *ast.IndexExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Index", nil, n.Index)
case *ast.SliceExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Low", nil, n.Low)
a.apply(n, "High", nil, n.High)
a.apply(n, "Max", nil, n.Max)
case *ast.TypeAssertExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Type", nil, n.Type)
case *ast.CallExpr:
a.apply(n, "Fun", nil, n.Fun)
a.applyList(n, "Args")
case *ast.StarExpr:
a.apply(n, "X", nil, n.X)
case *ast.UnaryExpr:
a.apply(n, "X", nil, n.X)
case *ast.BinaryExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Y", nil, n.Y)
case *ast.KeyValueExpr:
a.apply(n, "Key", nil, n.Key)
a.apply(n, "Value", nil, n.Value)
// Types
case *ast.ArrayType:
a.apply(n, "Len", nil, n.Len)
a.apply(n, "Elt", nil, n.Elt)
case *ast.StructType:
a.apply(n, "Fields", nil, n.Fields)
case *ast.FuncType:
a.apply(n, "Params", nil, n.Params)
a.apply(n, "Results", nil, n.Results)
case *ast.InterfaceType:
a.apply(n, "Methods", nil, n.Methods)
case *ast.MapType:
a.apply(n, "Key", nil, n.Key)
a.apply(n, "Value", nil, n.Value)
case *ast.ChanType:
a.apply(n, "Value", nil, n.Value)
// Statements
case *ast.BadStmt:
// nothing to do
case *ast.DeclStmt:
a.apply(n, "Decl", nil, n.Decl)
case *ast.EmptyStmt:
// nothing to do
case *ast.LabeledStmt:
a.apply(n, "Label", nil, n.Label)
a.apply(n, "Stmt", nil, n.Stmt)
case *ast.ExprStmt:
a.apply(n, "X", nil, n.X)
case *ast.SendStmt:
a.apply(n, "Chan", nil, n.Chan)
a.apply(n, "Value", nil, n.Value)
case *ast.IncDecStmt:
a.apply(n, "X", nil, n.X)
case *ast.AssignStmt:
a.applyList(n, "Lhs")
a.applyList(n, "Rhs")
case *ast.GoStmt:
a.apply(n, "Call", nil, n.Call)
case *ast.DeferStmt:
a.apply(n, "Call", nil, n.Call)
case *ast.ReturnStmt:
a.applyList(n, "Results")
case *ast.BranchStmt:
a.apply(n, "Label", nil, n.Label)
case *ast.BlockStmt:
a.applyList(n, "List")
case *ast.IfStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Cond", nil, n.Cond)
a.apply(n, "Body", nil, n.Body)
a.apply(n, "Else", nil, n.Else)
case *ast.CaseClause:
a.applyList(n, "List")
a.applyList(n, "Body")
case *ast.SwitchStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Tag", nil, n.Tag)
a.apply(n, "Body", nil, n.Body)
case *ast.TypeSwitchStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Assign", nil, n.Assign)
a.apply(n, "Body", nil, n.Body)
case *ast.CommClause:
a.apply(n, "Comm", nil, n.Comm)
a.applyList(n, "Body")
case *ast.SelectStmt:
a.apply(n, "Body", nil, n.Body)
case *ast.ForStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Cond", nil, n.Cond)
a.apply(n, "Post", nil, n.Post)
a.apply(n, "Body", nil, n.Body)
case *ast.RangeStmt:
a.apply(n, "Key", nil, n.Key)
a.apply(n, "Value", nil, n.Value)
a.apply(n, "X", nil, n.X)
a.apply(n, "Body", nil, n.Body)
// Declarations
case *ast.ImportSpec:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
a.apply(n, "Path", nil, n.Path)
a.apply(n, "Comment", nil, n.Comment)
case *ast.ValueSpec:
a.apply(n, "Doc", nil, n.Doc)
a.applyList(n, "Names")
a.apply(n, "Type", nil, n.Type)
a.applyList(n, "Values")
a.apply(n, "Comment", nil, n.Comment)
case *ast.TypeSpec:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Comment", nil, n.Comment)
case *ast.BadDecl:
// nothing to do
case *ast.GenDecl:
a.apply(n, "Doc", nil, n.Doc)
a.applyList(n, "Specs")
case *ast.FuncDecl:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Recv", nil, n.Recv)
a.apply(n, "Name", nil, n.Name)
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Body", nil, n.Body)
// Files and packages
case *ast.File:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
a.applyList(n, "Decls")
// Don't walk n.Comments; they have either been walked already if
// they are Doc comments, or they can be easily walked explicitly.
case *ast.Package:
// collect and sort names for reproducible behavior
var names []string
for name := range n.Files {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
a.apply(n, name, nil, n.Files[name])
}
default:
panic(fmt.Sprintf("Apply: unexpected node type %T", n))
}
if a.post != nil && !a.post(&a.cursor) {
panic(abort)
}
a.cursor = saved
}
// An iterator controls iteration over a slice of nodes.
type iterator struct {
index, step int
}
func (a *application) applyList(parent ast.Node, name string) {
// avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead
saved := a.iter
a.iter.index = 0
for {
// must reload parent.name each time, since cursor modifications might change it
v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name)
if a.iter.index >= v.Len() {
break
}
// element x may be nil in a bad AST - be cautious
var x ast.Node
if e := v.Index(a.iter.index); e.IsValid() {
x = e.Interface().(ast.Node)
}
a.iter.step = 1
a.apply(parent, name, &a.iter, x)
a.iter.index += a.iter.step
}
a.iter = saved
}

14
vendor/golang.org/x/tools/go/ast/astutil/util.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package astutil
import "go/ast"
// Unparen returns e with any enclosing parentheses stripped.
func Unparen(e ast.Expr) ast.Expr {
for {
p, ok := e.(*ast.ParenExpr)
if !ok {
return e
}
e = p.X
}
}

View File

@ -0,0 +1,109 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gcexportdata provides functions for locating, reading, and
// writing export data files containing type information produced by the
// gc compiler. This package supports go1.7 export data format and all
// later versions.
//
// Although it might seem convenient for this package to live alongside
// go/types in the standard library, this would cause version skew
// problems for developer tools that use it, since they must be able to
// consume the outputs of the gc compiler both before and after a Go
// update such as from Go 1.7 to Go 1.8. Because this package lives in
// golang.org/x/tools, sites can update their version of this repo some
// time before the Go 1.8 release and rebuild and redeploy their
// developer tools, which will then be able to consume both Go 1.7 and
// Go 1.8 export data files, so they will work before and after the
// Go update. (See discussion at https://golang.org/issue/15651.)
//
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
import (
"bufio"
"bytes"
"fmt"
"go/token"
"go/types"
"io"
"io/ioutil"
"golang.org/x/tools/go/internal/gcimporter"
)
// Find returns the name of an object (.o) or archive (.a) file
// containing type information for the specified import path,
// using the workspace layout conventions of go/build.
// If no file was found, an empty filename is returned.
//
// A relative srcDir is interpreted relative to the current working directory.
//
// Find also returns the package's resolved (canonical) import path,
// reflecting the effects of srcDir and vendoring on importPath.
func Find(importPath, srcDir string) (filename, path string) {
return gcimporter.FindPkg(importPath, srcDir)
}
// NewReader returns a reader for the export data section of an object
// (.o) or archive (.a) file read from r. The new reader may provide
// additional trailing data beyond the end of the export data.
func NewReader(r io.Reader) (io.Reader, error) {
buf := bufio.NewReader(r)
_, err := gcimporter.FindExportData(buf)
// If we ever switch to a zip-like archive format with the ToC
// at the end, we can return the correct portion of export data,
// but for now we must return the entire rest of the file.
return buf, err
}
// Read reads export data from in, decodes it, and returns type
// information for the package.
// The package name is specified by path.
// File position information is added to fset.
//
// Read may inspect and add to the imports map to ensure that references
// within the export data to other packages are consistent. The caller
// must ensure that imports[path] does not exist, or exists but is
// incomplete (see types.Package.Complete), and Read inserts the
// resulting package into this map entry.
//
// On return, the state of the reader is undefined.
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
data, err := ioutil.ReadAll(in)
if err != nil {
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
}
if bytes.HasPrefix(data, []byte("!<arch>")) {
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
}
// The App Engine Go runtime v1.6 uses the old export data format.
// TODO(adonovan): delete once v1.7 has been around for a while.
if bytes.HasPrefix(data, []byte("package ")) {
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
}
// The indexed export format starts with an 'i'; the older
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 && data[0] == 'i' {
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
return pkg, err
}
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
return pkg, err
}
// Write writes encoded type information for the specified package to out.
// The FileSet provides file position information for named objects.
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
b, err := gcimporter.BExportData(fset, pkg)
if err != nil {
return err
}
_, err = out.Write(b)
return err
}

73
vendor/golang.org/x/tools/go/gcexportdata/importer.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gcexportdata
import (
"fmt"
"go/token"
"go/types"
"os"
)
// NewImporter returns a new instance of the types.Importer interface
// that reads type information from export data files written by gc.
// The Importer also satisfies types.ImporterFrom.
//
// Export data files are located using "go build" workspace conventions
// and the build.Default context.
//
// Use this importer instead of go/importer.For("gc", ...) to avoid the
// version-skew problems described in the documentation of this package,
// or to control the FileSet or access the imports map populated during
// package loading.
//
func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
return importer{fset, imports}
}
type importer struct {
fset *token.FileSet
imports map[string]*types.Package
}
func (imp importer) Import(importPath string) (*types.Package, error) {
return imp.ImportFrom(importPath, "", 0)
}
func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
filename, path := Find(importPath, srcDir)
if filename == "" {
if importPath == "unsafe" {
// Even for unsafe, call Find first in case
// the package was vendored.
return types.Unsafe, nil
}
return nil, fmt.Errorf("can't find import: %s", importPath)
}
if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
return pkg, nil // cache hit
}
// open file
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
f.Close()
if err != nil {
// add file name to error
err = fmt.Errorf("reading export data: %s: %v", filename, err)
}
}()
r, err := NewReader(f)
if err != nil {
return nil, err
}
return Read(r, imp.fset, imp.imports, path)
}

99
vendor/golang.org/x/tools/go/gcexportdata/main.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// The gcexportdata command is a diagnostic tool that displays the
// contents of gc export data files.
package main
import (
"flag"
"fmt"
"go/token"
"go/types"
"log"
"os"
"golang.org/x/tools/go/gcexportdata"
"golang.org/x/tools/go/types/typeutil"
)
var packageFlag = flag.String("package", "", "alternative package to print")
func main() {
log.SetPrefix("gcexportdata: ")
log.SetFlags(0)
flag.Usage = func() {
fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a")
}
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
os.Exit(2)
}
filename := flag.Args()[0]
f, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
r, err := gcexportdata.NewReader(f)
if err != nil {
log.Fatalf("%s: %s", filename, err)
}
// Decode the package.
const primary = "<primary>"
imports := make(map[string]*types.Package)
fset := token.NewFileSet()
pkg, err := gcexportdata.Read(r, fset, imports, primary)
if err != nil {
log.Fatalf("%s: %s", filename, err)
}
// Optionally select an indirectly mentioned package.
if *packageFlag != "" {
pkg = imports[*packageFlag]
if pkg == nil {
fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n",
filename, *packageFlag)
for p := range imports {
if p != primary {
fmt.Fprintf(os.Stderr, "\t%s\n", p)
}
}
os.Exit(1)
}
}
// Print all package-level declarations, including non-exported ones.
fmt.Printf("package %s\n", pkg.Name())
for _, imp := range pkg.Imports() {
fmt.Printf("import %q\n", imp.Path())
}
qual := func(p *types.Package) string {
if pkg == p {
return ""
}
return p.Name()
}
scope := pkg.Scope()
for _, name := range scope.Names() {
obj := scope.Lookup(name)
fmt.Printf("%s: %s\n",
fset.Position(obj.Pos()),
types.ObjectString(obj, qual))
// For types, print each method.
if _, ok := obj.(*types.TypeName); ok {
for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) {
fmt.Printf("%s: %s\n",
fset.Position(method.Obj().Pos()),
types.SelectionString(method, qual))
}
}
}
}

View File

@ -0,0 +1,852 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Binary package export.
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
// see that file for specification of the format.
package gcimporter
import (
"bytes"
"encoding/binary"
"fmt"
"go/ast"
"go/constant"
"go/token"
"go/types"
"math"
"math/big"
"sort"
"strings"
)
// If debugFormat is set, each integer and string value is preceded by a marker
// and position information in the encoding. This mechanism permits an importer
// to recognize immediately when it is out of sync. The importer recognizes this
// mode automatically (i.e., it can import export data produced with debugging
// support even if debugFormat is not set at the time of import). This mode will
// lead to massively larger export data (by a factor of 2 to 3) and should only
// be enabled during development and debugging.
//
// NOTE: This flag is the first flag to enable if importing dies because of
// (suspected) format errors, and whenever a change is made to the format.
const debugFormat = false // default: false
// If trace is set, debugging output is printed to std out.
const trace = false // default: false
// Current export format version. Increase with each format change.
// Note: The latest binary (non-indexed) export format is at version 6.
// This exporter is still at level 4, but it doesn't matter since
// the binary importer can handle older versions just fine.
// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
// 4: type name objects support type aliases, uses aliasTag
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
// 2: removed unused bool in ODCL export (compiler only)
// 1: header format change (more regular), export package for _ struct fields
// 0: Go1.7 encoding
const exportVersion = 4
// trackAllTypes enables cycle tracking for all types, not just named
// types. The existing compiler invariants assume that unnamed types
// that are not completely set up are not used, or else there are spurious
// errors.
// If disabled, only named types are tracked, possibly leading to slightly
// less efficient encoding in rare cases. It also prevents the export of
// some corner-case type declarations (but those are not handled correctly
// with with the textual export format either).
// TODO(gri) enable and remove once issues caused by it are fixed
const trackAllTypes = false
type exporter struct {
fset *token.FileSet
out bytes.Buffer
// object -> index maps, indexed in order of serialization
strIndex map[string]int
pkgIndex map[*types.Package]int
typIndex map[types.Type]int
// position encoding
posInfoFormat bool
prevFile string
prevLine int
// debugging support
written int // bytes written
indent int // for trace
}
// internalError represents an error generated inside this package.
type internalError string
func (e internalError) Error() string { return "gcimporter: " + string(e) }
func internalErrorf(format string, args ...interface{}) error {
return internalError(fmt.Sprintf(format, args...))
}
// BExportData returns binary export data for pkg.
// If no file set is provided, position info will be missing.
func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
defer func() {
if e := recover(); e != nil {
if ierr, ok := e.(internalError); ok {
err = ierr
return
}
// Not an internal error; panic again.
panic(e)
}
}()
p := exporter{
fset: fset,
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
pkgIndex: make(map[*types.Package]int),
typIndex: make(map[types.Type]int),
posInfoFormat: true, // TODO(gri) might become a flag, eventually
}
// write version info
// The version string must start with "version %d" where %d is the version
// number. Additional debugging information may follow after a blank; that
// text is ignored by the importer.
p.rawStringln(fmt.Sprintf("version %d", exportVersion))
var debug string
if debugFormat {
debug = "debug"
}
p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
p.bool(trackAllTypes)
p.bool(p.posInfoFormat)
// --- generic export data ---
// populate type map with predeclared "known" types
for index, typ := range predeclared() {
p.typIndex[typ] = index
}
if len(p.typIndex) != len(predeclared()) {
return nil, internalError("duplicate entries in type map?")
}
// write package data
p.pkg(pkg, true)
if trace {
p.tracef("\n")
}
// write objects
objcount := 0
scope := pkg.Scope()
for _, name := range scope.Names() {
if !ast.IsExported(name) {
continue
}
if trace {
p.tracef("\n")
}
p.obj(scope.Lookup(name))
objcount++
}
// indicate end of list
if trace {
p.tracef("\n")
}
p.tag(endTag)
// for self-verification only (redundant)
p.int(objcount)
if trace {
p.tracef("\n")
}
// --- end of export data ---
return p.out.Bytes(), nil
}
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
if pkg == nil {
panic(internalError("unexpected nil pkg"))
}
// if we saw the package before, write its index (>= 0)
if i, ok := p.pkgIndex[pkg]; ok {
p.index('P', i)
return
}
// otherwise, remember the package, write the package tag (< 0) and package data
if trace {
p.tracef("P%d = { ", len(p.pkgIndex))
defer p.tracef("} ")
}
p.pkgIndex[pkg] = len(p.pkgIndex)
p.tag(packageTag)
p.string(pkg.Name())
if emptypath {
p.string("")
} else {
p.string(pkg.Path())
}
}
func (p *exporter) obj(obj types.Object) {
switch obj := obj.(type) {
case *types.Const:
p.tag(constTag)
p.pos(obj)
p.qualifiedName(obj)
p.typ(obj.Type())
p.value(obj.Val())
case *types.TypeName:
if obj.IsAlias() {
p.tag(aliasTag)
p.pos(obj)
p.qualifiedName(obj)
} else {
p.tag(typeTag)
}
p.typ(obj.Type())
case *types.Var:
p.tag(varTag)
p.pos(obj)
p.qualifiedName(obj)
p.typ(obj.Type())
case *types.Func:
p.tag(funcTag)
p.pos(obj)
p.qualifiedName(obj)
sig := obj.Type().(*types.Signature)
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
default:
panic(internalErrorf("unexpected object %v (%T)", obj, obj))
}
}
func (p *exporter) pos(obj types.Object) {
if !p.posInfoFormat {
return
}
file, line := p.fileLine(obj)
if file == p.prevFile {
// common case: write line delta
// delta == 0 means different file or no line change
delta := line - p.prevLine
p.int(delta)
if delta == 0 {
p.int(-1) // -1 means no file change
}
} else {
// different file
p.int(0)
// Encode filename as length of common prefix with previous
// filename, followed by (possibly empty) suffix. Filenames
// frequently share path prefixes, so this can save a lot
// of space and make export data size less dependent on file
// path length. The suffix is unlikely to be empty because
// file names tend to end in ".go".
n := commonPrefixLen(p.prevFile, file)
p.int(n) // n >= 0
p.string(file[n:]) // write suffix only
p.prevFile = file
p.int(line)
}
p.prevLine = line
}
func (p *exporter) fileLine(obj types.Object) (file string, line int) {
if p.fset != nil {
pos := p.fset.Position(obj.Pos())
file = pos.Filename
line = pos.Line
}
return
}
func commonPrefixLen(a, b string) int {
if len(a) > len(b) {
a, b = b, a
}
// len(a) <= len(b)
i := 0
for i < len(a) && a[i] == b[i] {
i++
}
return i
}
func (p *exporter) qualifiedName(obj types.Object) {
p.string(obj.Name())
p.pkg(obj.Pkg(), false)
}
func (p *exporter) typ(t types.Type) {
if t == nil {
panic(internalError("nil type"))
}
// Possible optimization: Anonymous pointer types *T where
// T is a named type are common. We could canonicalize all
// such types *T to a single type PT = *T. This would lead
// to at most one *T entry in typIndex, and all future *T's
// would be encoded as the respective index directly. Would
// save 1 byte (pointerTag) per *T and reduce the typIndex
// size (at the cost of a canonicalization map). We can do
// this later, without encoding format change.
// if we saw the type before, write its index (>= 0)
if i, ok := p.typIndex[t]; ok {
p.index('T', i)
return
}
// otherwise, remember the type, write the type tag (< 0) and type data
if trackAllTypes {
if trace {
p.tracef("T%d = {>\n", len(p.typIndex))
defer p.tracef("<\n} ")
}
p.typIndex[t] = len(p.typIndex)
}
switch t := t.(type) {
case *types.Named:
if !trackAllTypes {
// if we don't track all types, track named types now
p.typIndex[t] = len(p.typIndex)
}
p.tag(namedTag)
p.pos(t.Obj())
p.qualifiedName(t.Obj())
p.typ(t.Underlying())
if !types.IsInterface(t) {
p.assocMethods(t)
}
case *types.Array:
p.tag(arrayTag)
p.int64(t.Len())
p.typ(t.Elem())
case *types.Slice:
p.tag(sliceTag)
p.typ(t.Elem())
case *dddSlice:
p.tag(dddTag)
p.typ(t.elem)
case *types.Struct:
p.tag(structTag)
p.fieldList(t)
case *types.Pointer:
p.tag(pointerTag)
p.typ(t.Elem())
case *types.Signature:
p.tag(signatureTag)
p.paramList(t.Params(), t.Variadic())
p.paramList(t.Results(), false)
case *types.Interface:
p.tag(interfaceTag)
p.iface(t)
case *types.Map:
p.tag(mapTag)
p.typ(t.Key())
p.typ(t.Elem())
case *types.Chan:
p.tag(chanTag)
p.int(int(3 - t.Dir())) // hack
p.typ(t.Elem())
default:
panic(internalErrorf("unexpected type %T: %s", t, t))
}
}
func (p *exporter) assocMethods(named *types.Named) {
// Sort methods (for determinism).
var methods []*types.Func
for i := 0; i < named.NumMethods(); i++ {
methods = append(methods, named.Method(i))
}
sort.Sort(methodsByName(methods))
p.int(len(methods))
if trace && methods != nil {
p.tracef("associated methods {>\n")
}
for i, m := range methods {
if trace && i > 0 {
p.tracef("\n")
}
p.pos(m)
name := m.Name()
p.string(name)
if !exported(name) {
p.pkg(m.Pkg(), false)
}
sig := m.Type().(*types.Signature)
p.paramList(types.NewTuple(sig.Recv()), false)
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
p.int(0) // dummy value for go:nointerface pragma - ignored by importer
}
if trace && methods != nil {
p.tracef("<\n} ")
}
}
type methodsByName []*types.Func
func (x methodsByName) Len() int { return len(x) }
func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
func (p *exporter) fieldList(t *types.Struct) {
if trace && t.NumFields() > 0 {
p.tracef("fields {>\n")
defer p.tracef("<\n} ")
}
p.int(t.NumFields())
for i := 0; i < t.NumFields(); i++ {
if trace && i > 0 {
p.tracef("\n")
}
p.field(t.Field(i))
p.string(t.Tag(i))
}
}
func (p *exporter) field(f *types.Var) {
if !f.IsField() {
panic(internalError("field expected"))
}
p.pos(f)
p.fieldName(f)
p.typ(f.Type())
}
func (p *exporter) iface(t *types.Interface) {
// TODO(gri): enable importer to load embedded interfaces,
// then emit Embeddeds and ExplicitMethods separately here.
p.int(0)
n := t.NumMethods()
if trace && n > 0 {
p.tracef("methods {>\n")
defer p.tracef("<\n} ")
}
p.int(n)
for i := 0; i < n; i++ {
if trace && i > 0 {
p.tracef("\n")
}
p.method(t.Method(i))
}
}
func (p *exporter) method(m *types.Func) {
sig := m.Type().(*types.Signature)
if sig.Recv() == nil {
panic(internalError("method expected"))
}
p.pos(m)
p.string(m.Name())
if m.Name() != "_" && !ast.IsExported(m.Name()) {
p.pkg(m.Pkg(), false)
}
// interface method; no need to encode receiver.
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
}
func (p *exporter) fieldName(f *types.Var) {
name := f.Name()
if f.Anonymous() {
// anonymous field - we distinguish between 3 cases:
// 1) field name matches base type name and is exported
// 2) field name matches base type name and is not exported
// 3) field name doesn't match base type name (alias name)
bname := basetypeName(f.Type())
if name == bname {
if ast.IsExported(name) {
name = "" // 1) we don't need to know the field name or package
} else {
name = "?" // 2) use unexported name "?" to force package export
}
} else {
// 3) indicate alias and export name as is
// (this requires an extra "@" but this is a rare case)
p.string("@")
}
}
p.string(name)
if name != "" && !ast.IsExported(name) {
p.pkg(f.Pkg(), false)
}
}
func basetypeName(typ types.Type) string {
switch typ := deref(typ).(type) {
case *types.Basic:
return typ.Name()
case *types.Named:
return typ.Obj().Name()
default:
return "" // unnamed type
}
}
func (p *exporter) paramList(params *types.Tuple, variadic bool) {
// use negative length to indicate unnamed parameters
// (look at the first parameter only since either all
// names are present or all are absent)
n := params.Len()
if n > 0 && params.At(0).Name() == "" {
n = -n
}
p.int(n)
for i := 0; i < params.Len(); i++ {
q := params.At(i)
t := q.Type()
if variadic && i == params.Len()-1 {
t = &dddSlice{t.(*types.Slice).Elem()}
}
p.typ(t)
if n > 0 {
name := q.Name()
p.string(name)
if name != "_" {
p.pkg(q.Pkg(), false)
}
}
p.string("") // no compiler-specific info
}
}
func (p *exporter) value(x constant.Value) {
if trace {
p.tracef("= ")
}
switch x.Kind() {
case constant.Bool:
tag := falseTag
if constant.BoolVal(x) {
tag = trueTag
}
p.tag(tag)
case constant.Int:
if v, exact := constant.Int64Val(x); exact {
// common case: x fits into an int64 - use compact encoding
p.tag(int64Tag)
p.int64(v)
return
}
// uncommon case: large x - use float encoding
// (powers of 2 will be encoded efficiently with exponent)
p.tag(floatTag)
p.float(constant.ToFloat(x))
case constant.Float:
p.tag(floatTag)
p.float(x)
case constant.Complex:
p.tag(complexTag)
p.float(constant.Real(x))
p.float(constant.Imag(x))
case constant.String:
p.tag(stringTag)
p.string(constant.StringVal(x))
case constant.Unknown:
// package contains type errors
p.tag(unknownTag)
default:
panic(internalErrorf("unexpected value %v (%T)", x, x))
}
}
func (p *exporter) float(x constant.Value) {
if x.Kind() != constant.Float {
panic(internalErrorf("unexpected constant %v, want float", x))
}
// extract sign (there is no -0)
sign := constant.Sign(x)
if sign == 0 {
// x == 0
p.int(0)
return
}
// x != 0
var f big.Float
if v, exact := constant.Float64Val(x); exact {
// float64
f.SetFloat64(v)
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
// TODO(gri): add big.Rat accessor to constant.Value.
r := valueToRat(num)
f.SetRat(r.Quo(r, valueToRat(denom)))
} else {
// Value too large to represent as a fraction => inaccessible.
// TODO(gri): add big.Float accessor to constant.Value.
f.SetFloat64(math.MaxFloat64) // FIXME
}
// extract exponent such that 0.5 <= m < 1.0
var m big.Float
exp := f.MantExp(&m)
// extract mantissa as *big.Int
// - set exponent large enough so mant satisfies mant.IsInt()
// - get *big.Int from mant
m.SetMantExp(&m, int(m.MinPrec()))
mant, acc := m.Int(nil)
if acc != big.Exact {
panic(internalError("internal error"))
}
p.int(sign)
p.int(exp)
p.string(string(mant.Bytes()))
}
func valueToRat(x constant.Value) *big.Rat {
// Convert little-endian to big-endian.
// I can't believe this is necessary.
bytes := constant.Bytes(x)
for i := 0; i < len(bytes)/2; i++ {
bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
}
return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
}
func (p *exporter) bool(b bool) bool {
if trace {
p.tracef("[")
defer p.tracef("= %v] ", b)
}
x := 0
if b {
x = 1
}
p.int(x)
return b
}
// ----------------------------------------------------------------------------
// Low-level encoders
func (p *exporter) index(marker byte, index int) {
if index < 0 {
panic(internalError("invalid index < 0"))
}
if debugFormat {
p.marker('t')
}
if trace {
p.tracef("%c%d ", marker, index)
}
p.rawInt64(int64(index))
}
func (p *exporter) tag(tag int) {
if tag >= 0 {
panic(internalError("invalid tag >= 0"))
}
if debugFormat {
p.marker('t')
}
if trace {
p.tracef("%s ", tagString[-tag])
}
p.rawInt64(int64(tag))
}
func (p *exporter) int(x int) {
p.int64(int64(x))
}
func (p *exporter) int64(x int64) {
if debugFormat {
p.marker('i')
}
if trace {
p.tracef("%d ", x)
}
p.rawInt64(x)
}
func (p *exporter) string(s string) {
if debugFormat {
p.marker('s')
}
if trace {
p.tracef("%q ", s)
}
// if we saw the string before, write its index (>= 0)
// (the empty string is mapped to 0)
if i, ok := p.strIndex[s]; ok {
p.rawInt64(int64(i))
return
}
// otherwise, remember string and write its negative length and bytes
p.strIndex[s] = len(p.strIndex)
p.rawInt64(-int64(len(s)))
for i := 0; i < len(s); i++ {
p.rawByte(s[i])
}
}
// marker emits a marker byte and position information which makes
// it easy for a reader to detect if it is "out of sync". Used for
// debugFormat format only.
func (p *exporter) marker(m byte) {
p.rawByte(m)
// Enable this for help tracking down the location
// of an incorrect marker when running in debugFormat.
if false && trace {
p.tracef("#%d ", p.written)
}
p.rawInt64(int64(p.written))
}
// rawInt64 should only be used by low-level encoders.
func (p *exporter) rawInt64(x int64) {
var tmp [binary.MaxVarintLen64]byte
n := binary.PutVarint(tmp[:], x)
for i := 0; i < n; i++ {
p.rawByte(tmp[i])
}
}
// rawStringln should only be used to emit the initial version string.
func (p *exporter) rawStringln(s string) {
for i := 0; i < len(s); i++ {
p.rawByte(s[i])
}
p.rawByte('\n')
}
// rawByte is the bottleneck interface to write to p.out.
// rawByte escapes b as follows (any encoding does that
// hides '$'):
//
// '$' => '|' 'S'
// '|' => '|' '|'
//
// Necessary so other tools can find the end of the
// export data by searching for "$$".
// rawByte should only be used by low-level encoders.
func (p *exporter) rawByte(b byte) {
switch b {
case '$':
// write '$' as '|' 'S'
b = 'S'
fallthrough
case '|':
// write '|' as '|' '|'
p.out.WriteByte('|')
p.written++
}
p.out.WriteByte(b)
p.written++
}
// tracef is like fmt.Printf but it rewrites the format string
// to take care of indentation.
func (p *exporter) tracef(format string, args ...interface{}) {
if strings.ContainsAny(format, "<>\n") {
var buf bytes.Buffer
for i := 0; i < len(format); i++ {
// no need to deal with runes
ch := format[i]
switch ch {
case '>':
p.indent++
continue
case '<':
p.indent--
continue
}
buf.WriteByte(ch)
if ch == '\n' {
for j := p.indent; j > 0; j-- {
buf.WriteString(". ")
}
}
}
format = buf.String()
}
fmt.Printf(format, args...)
}
// Debugging support.
// (tagString is only used when tracing is enabled)
var tagString = [...]string{
// Packages
-packageTag: "package",
// Types
-namedTag: "named type",
-arrayTag: "array",
-sliceTag: "slice",
-dddTag: "ddd",
-structTag: "struct",
-pointerTag: "pointer",
-signatureTag: "signature",
-interfaceTag: "interface",
-mapTag: "map",
-chanTag: "chan",
// Values
-falseTag: "false",
-trueTag: "true",
-int64Tag: "int64",
-floatTag: "float",
-fractionTag: "fraction",
-complexTag: "complex",
-stringTag: "string",
-unknownTag: "unknown",
// Type aliases
-aliasTag: "alias",
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,93 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
// This file implements FindExportData.
package gcimporter
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
)
func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
// See $GOROOT/include/ar.h.
hdr := make([]byte, 16+12+6+6+8+10+2)
_, err = io.ReadFull(r, hdr)
if err != nil {
return
}
// leave for debugging
if false {
fmt.Printf("header: %s", hdr)
}
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
size, err = strconv.Atoi(s)
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
err = fmt.Errorf("invalid archive header")
return
}
name = strings.TrimSpace(string(hdr[:16]))
return
}
// FindExportData positions the reader r at the beginning of the
// export data section of an underlying GC-created object/archive
// file by reading from it. The reader must be positioned at the
// start of the file before calling this function. The hdr result
// is the string before the export data, either "$$" or "$$B".
//
func FindExportData(r *bufio.Reader) (hdr string, err error) {
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
if string(line) == "!<arch>\n" {
// Archive file. Scan to __.PKGDEF.
var name string
if name, _, err = readGopackHeader(r); err != nil {
return
}
// First entry should be __.PKGDEF.
if name != "__.PKGDEF" {
err = fmt.Errorf("go archive is missing __.PKGDEF")
return
}
// Read first line of __.PKGDEF data, so that line
// is once again the first line of the input.
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
}
// Now at __.PKGDEF in archive or still at beginning of file.
// Either way, line should begin with "go object ".
if !strings.HasPrefix(string(line), "go object ") {
err = fmt.Errorf("not a Go object file")
return
}
// Skip over object header to export data.
// Begins after first line starting with $$.
for line[0] != '$' {
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
}
hdr = string(line)
return
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,723 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Indexed binary package export.
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
// see that file for specification of the format.
// +build go1.11
package gcimporter
import (
"bytes"
"encoding/binary"
"go/ast"
"go/constant"
"go/token"
"go/types"
"io"
"math/big"
"reflect"
"sort"
)
// Current indexed export format version. Increase with each format change.
// 0: Go1.11 encoding
const iexportVersion = 0
// IExportData returns the binary export data for pkg.
// If no file set is provided, position info will be missing.
func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
defer func() {
if e := recover(); e != nil {
if ierr, ok := e.(internalError); ok {
err = ierr
return
}
// Not an internal error; panic again.
panic(e)
}
}()
p := iexporter{
out: bytes.NewBuffer(nil),
fset: fset,
allPkgs: map[*types.Package]bool{},
stringIndex: map[string]uint64{},
declIndex: map[types.Object]uint64{},
typIndex: map[types.Type]uint64{},
}
for i, pt := range predeclared() {
p.typIndex[pt] = uint64(i)
}
if len(p.typIndex) > predeclReserved {
panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
}
// Initialize work queue with exported declarations.
scope := pkg.Scope()
for _, name := range scope.Names() {
if ast.IsExported(name) {
p.pushDecl(scope.Lookup(name))
}
}
// Loop until no more work.
for !p.declTodo.empty() {
p.doDecl(p.declTodo.popHead())
}
// Append indices to data0 section.
dataLen := uint64(p.data0.Len())
w := p.newWriter()
w.writeIndex(p.declIndex, pkg)
w.flush()
// Assemble header.
var hdr intWriter
hdr.WriteByte('i')
hdr.uint64(iexportVersion)
hdr.uint64(uint64(p.strings.Len()))
hdr.uint64(dataLen)
// Flush output.
io.Copy(p.out, &hdr)
io.Copy(p.out, &p.strings)
io.Copy(p.out, &p.data0)
return p.out.Bytes(), nil
}
// writeIndex writes out an object index. mainIndex indicates whether
// we're writing out the main index, which is also read by
// non-compiler tools and includes a complete package description
// (i.e., name and height).
func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) {
// Build a map from packages to objects from that package.
pkgObjs := map[*types.Package][]types.Object{}
// For the main index, make sure to include every package that
// we reference, even if we're not exporting (or reexporting)
// any symbols from it.
pkgObjs[localpkg] = nil
for pkg := range w.p.allPkgs {
pkgObjs[pkg] = nil
}
for obj := range index {
pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj)
}
var pkgs []*types.Package
for pkg, objs := range pkgObjs {
pkgs = append(pkgs, pkg)
sort.Slice(objs, func(i, j int) bool {
return objs[i].Name() < objs[j].Name()
})
}
sort.Slice(pkgs, func(i, j int) bool {
return pkgs[i].Path() < pkgs[j].Path()
})
w.uint64(uint64(len(pkgs)))
for _, pkg := range pkgs {
w.string(pkg.Path())
w.string(pkg.Name())
w.uint64(uint64(0)) // package height is not needed for go/types
objs := pkgObjs[pkg]
w.uint64(uint64(len(objs)))
for _, obj := range objs {
w.string(obj.Name())
w.uint64(index[obj])
}
}
}
type iexporter struct {
fset *token.FileSet
out *bytes.Buffer
// allPkgs tracks all packages that have been referenced by
// the export data, so we can ensure to include them in the
// main index.
allPkgs map[*types.Package]bool
declTodo objQueue
strings intWriter
stringIndex map[string]uint64
data0 intWriter
declIndex map[types.Object]uint64
typIndex map[types.Type]uint64
}
// stringOff returns the offset of s within the string section.
// If not already present, it's added to the end.
func (p *iexporter) stringOff(s string) uint64 {
off, ok := p.stringIndex[s]
if !ok {
off = uint64(p.strings.Len())
p.stringIndex[s] = off
p.strings.uint64(uint64(len(s)))
p.strings.WriteString(s)
}
return off
}
// pushDecl adds n to the declaration work queue, if not already present.
func (p *iexporter) pushDecl(obj types.Object) {
// Package unsafe is known to the compiler and predeclared.
assert(obj.Pkg() != types.Unsafe)
if _, ok := p.declIndex[obj]; ok {
return
}
p.declIndex[obj] = ^uint64(0) // mark n present in work queue
p.declTodo.pushTail(obj)
}
// exportWriter handles writing out individual data section chunks.
type exportWriter struct {
p *iexporter
data intWriter
currPkg *types.Package
prevFile string
prevLine int64
}
func (p *iexporter) doDecl(obj types.Object) {
w := p.newWriter()
w.setPkg(obj.Pkg(), false)
switch obj := obj.(type) {
case *types.Var:
w.tag('V')
w.pos(obj.Pos())
w.typ(obj.Type(), obj.Pkg())
case *types.Func:
sig, _ := obj.Type().(*types.Signature)
if sig.Recv() != nil {
panic(internalErrorf("unexpected method: %v", sig))
}
w.tag('F')
w.pos(obj.Pos())
w.signature(sig)
case *types.Const:
w.tag('C')
w.pos(obj.Pos())
w.value(obj.Type(), obj.Val())
case *types.TypeName:
if obj.IsAlias() {
w.tag('A')
w.pos(obj.Pos())
w.typ(obj.Type(), obj.Pkg())
break
}
// Defined type.
w.tag('T')
w.pos(obj.Pos())
underlying := obj.Type().Underlying()
w.typ(underlying, obj.Pkg())
t := obj.Type()
if types.IsInterface(t) {
break
}
named, ok := t.(*types.Named)
if !ok {
panic(internalErrorf("%s is not a defined type", t))
}
n := named.NumMethods()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
m := named.Method(i)
w.pos(m.Pos())
w.string(m.Name())
sig, _ := m.Type().(*types.Signature)
w.param(sig.Recv())
w.signature(sig)
}
default:
panic(internalErrorf("unexpected object: %v", obj))
}
p.declIndex[obj] = w.flush()
}
func (w *exportWriter) tag(tag byte) {
w.data.WriteByte(tag)
}
func (w *exportWriter) pos(pos token.Pos) {
p := w.p.fset.Position(pos)
file := p.Filename
line := int64(p.Line)
// When file is the same as the last position (common case),
// we can save a few bytes by delta encoding just the line
// number.
//
// Note: Because data objects may be read out of order (or not
// at all), we can only apply delta encoding within a single
// object. This is handled implicitly by tracking prevFile and
// prevLine as fields of exportWriter.
if file == w.prevFile {
delta := line - w.prevLine
w.int64(delta)
if delta == deltaNewFile {
w.int64(-1)
}
} else {
w.int64(deltaNewFile)
w.int64(line) // line >= 0
w.string(file)
w.prevFile = file
}
w.prevLine = line
}
func (w *exportWriter) pkg(pkg *types.Package) {
// Ensure any referenced packages are declared in the main index.
w.p.allPkgs[pkg] = true
w.string(pkg.Path())
}
func (w *exportWriter) qualifiedIdent(obj types.Object) {
// Ensure any referenced declarations are written out too.
w.p.pushDecl(obj)
w.string(obj.Name())
w.pkg(obj.Pkg())
}
func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
w.data.uint64(w.p.typOff(t, pkg))
}
func (p *iexporter) newWriter() *exportWriter {
return &exportWriter{p: p}
}
func (w *exportWriter) flush() uint64 {
off := uint64(w.p.data0.Len())
io.Copy(&w.p.data0, &w.data)
return off
}
func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
off, ok := p.typIndex[t]
if !ok {
w := p.newWriter()
w.doTyp(t, pkg)
off = predeclReserved + w.flush()
p.typIndex[t] = off
}
return off
}
func (w *exportWriter) startType(k itag) {
w.data.uint64(uint64(k))
}
func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
switch t := t.(type) {
case *types.Named:
w.startType(definedType)
w.qualifiedIdent(t.Obj())
case *types.Pointer:
w.startType(pointerType)
w.typ(t.Elem(), pkg)
case *types.Slice:
w.startType(sliceType)
w.typ(t.Elem(), pkg)
case *types.Array:
w.startType(arrayType)
w.uint64(uint64(t.Len()))
w.typ(t.Elem(), pkg)
case *types.Chan:
w.startType(chanType)
// 1 RecvOnly; 2 SendOnly; 3 SendRecv
var dir uint64
switch t.Dir() {
case types.RecvOnly:
dir = 1
case types.SendOnly:
dir = 2
case types.SendRecv:
dir = 3
}
w.uint64(dir)
w.typ(t.Elem(), pkg)
case *types.Map:
w.startType(mapType)
w.typ(t.Key(), pkg)
w.typ(t.Elem(), pkg)
case *types.Signature:
w.startType(signatureType)
w.setPkg(pkg, true)
w.signature(t)
case *types.Struct:
w.startType(structType)
w.setPkg(pkg, true)
n := t.NumFields()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
f := t.Field(i)
w.pos(f.Pos())
w.string(f.Name())
w.typ(f.Type(), pkg)
w.bool(f.Embedded())
w.string(t.Tag(i)) // note (or tag)
}
case *types.Interface:
w.startType(interfaceType)
w.setPkg(pkg, true)
n := t.NumEmbeddeds()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
f := t.Embedded(i)
w.pos(f.Obj().Pos())
w.typ(f.Obj().Type(), f.Obj().Pkg())
}
n = t.NumExplicitMethods()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
m := t.ExplicitMethod(i)
w.pos(m.Pos())
w.string(m.Name())
sig, _ := m.Type().(*types.Signature)
w.signature(sig)
}
default:
panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
}
}
func (w *exportWriter) setPkg(pkg *types.Package, write bool) {
if write {
w.pkg(pkg)
}
w.currPkg = pkg
}
func (w *exportWriter) signature(sig *types.Signature) {
w.paramList(sig.Params())
w.paramList(sig.Results())
if sig.Params().Len() > 0 {
w.bool(sig.Variadic())
}
}
func (w *exportWriter) paramList(tup *types.Tuple) {
n := tup.Len()
w.uint64(uint64(n))
for i := 0; i < n; i++ {
w.param(tup.At(i))
}
}
func (w *exportWriter) param(obj types.Object) {
w.pos(obj.Pos())
w.localIdent(obj)
w.typ(obj.Type(), obj.Pkg())
}
func (w *exportWriter) value(typ types.Type, v constant.Value) {
w.typ(typ, nil)
switch v.Kind() {
case constant.Bool:
w.bool(constant.BoolVal(v))
case constant.Int:
var i big.Int
if i64, exact := constant.Int64Val(v); exact {
i.SetInt64(i64)
} else if ui64, exact := constant.Uint64Val(v); exact {
i.SetUint64(ui64)
} else {
i.SetString(v.ExactString(), 10)
}
w.mpint(&i, typ)
case constant.Float:
f := constantToFloat(v)
w.mpfloat(f, typ)
case constant.Complex:
w.mpfloat(constantToFloat(constant.Real(v)), typ)
w.mpfloat(constantToFloat(constant.Imag(v)), typ)
case constant.String:
w.string(constant.StringVal(v))
case constant.Unknown:
// package contains type errors
default:
panic(internalErrorf("unexpected value %v (%T)", v, v))
}
}
// constantToFloat converts a constant.Value with kind constant.Float to a
// big.Float.
func constantToFloat(x constant.Value) *big.Float {
assert(x.Kind() == constant.Float)
// Use the same floating-point precision (512) as cmd/compile
// (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
const mpprec = 512
var f big.Float
f.SetPrec(mpprec)
if v, exact := constant.Float64Val(x); exact {
// float64
f.SetFloat64(v)
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
// TODO(gri): add big.Rat accessor to constant.Value.
n := valueToRat(num)
d := valueToRat(denom)
f.SetRat(n.Quo(n, d))
} else {
// Value too large to represent as a fraction => inaccessible.
// TODO(gri): add big.Float accessor to constant.Value.
_, ok := f.SetString(x.ExactString())
assert(ok)
}
return &f
}
// mpint exports a multi-precision integer.
//
// For unsigned types, small values are written out as a single
// byte. Larger values are written out as a length-prefixed big-endian
// byte string, where the length prefix is encoded as its complement.
// For example, bytes 0, 1, and 2 directly represent the integer
// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
// 2-, and 3-byte big-endian string follow.
//
// Encoding for signed types use the same general approach as for
// unsigned types, except small values use zig-zag encoding and the
// bottom bit of length prefix byte for large values is reserved as a
// sign bit.
//
// The exact boundary between small and large encodings varies
// according to the maximum number of bytes needed to encode a value
// of type typ. As a special case, 8-bit types are always encoded as a
// single byte.
//
// TODO(mdempsky): Is this level of complexity really worthwhile?
func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
basic, ok := typ.Underlying().(*types.Basic)
if !ok {
panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
}
signed, maxBytes := intSize(basic)
negative := x.Sign() < 0
if !signed && negative {
panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
}
b := x.Bytes()
if len(b) > 0 && b[0] == 0 {
panic(internalErrorf("leading zeros"))
}
if uint(len(b)) > maxBytes {
panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
}
maxSmall := 256 - maxBytes
if signed {
maxSmall = 256 - 2*maxBytes
}
if maxBytes == 1 {
maxSmall = 256
}
// Check if x can use small value encoding.
if len(b) <= 1 {
var ux uint
if len(b) == 1 {
ux = uint(b[0])
}
if signed {
ux <<= 1
if negative {
ux--
}
}
if ux < maxSmall {
w.data.WriteByte(byte(ux))
return
}
}
n := 256 - uint(len(b))
if signed {
n = 256 - 2*uint(len(b))
if negative {
n |= 1
}
}
if n < maxSmall || n >= 256 {
panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
}
w.data.WriteByte(byte(n))
w.data.Write(b)
}
// mpfloat exports a multi-precision floating point number.
//
// The number's value is decomposed into mantissa × 2**exponent, where
// mantissa is an integer. The value is written out as mantissa (as a
// multi-precision integer) and then the exponent, except exponent is
// omitted if mantissa is zero.
func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
if f.IsInf() {
panic("infinite constant")
}
// Break into f = mant × 2**exp, with 0.5 <= mant < 1.
var mant big.Float
exp := int64(f.MantExp(&mant))
// Scale so that mant is an integer.
prec := mant.MinPrec()
mant.SetMantExp(&mant, int(prec))
exp -= int64(prec)
manti, acc := mant.Int(nil)
if acc != big.Exact {
panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
}
w.mpint(manti, typ)
if manti.Sign() != 0 {
w.int64(exp)
}
}
func (w *exportWriter) bool(b bool) bool {
var x uint64
if b {
x = 1
}
w.uint64(x)
return b
}
func (w *exportWriter) int64(x int64) { w.data.int64(x) }
func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
func (w *exportWriter) localIdent(obj types.Object) {
// Anonymous parameters.
if obj == nil {
w.string("")
return
}
name := obj.Name()
if name == "_" {
w.string("_")
return
}
w.string(name)
}
type intWriter struct {
bytes.Buffer
}
func (w *intWriter) int64(x int64) {
var buf [binary.MaxVarintLen64]byte
n := binary.PutVarint(buf[:], x)
w.Write(buf[:n])
}
func (w *intWriter) uint64(x uint64) {
var buf [binary.MaxVarintLen64]byte
n := binary.PutUvarint(buf[:], x)
w.Write(buf[:n])
}
func assert(cond bool) {
if !cond {
panic("internal error: assertion failed")
}
}
// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
// a ready-to-use empty queue.
type objQueue struct {
ring []types.Object
head, tail int
}
// empty returns true if q contains no Nodes.
func (q *objQueue) empty() bool {
return q.head == q.tail
}
// pushTail appends n to the tail of the queue.
func (q *objQueue) pushTail(obj types.Object) {
if len(q.ring) == 0 {
q.ring = make([]types.Object, 16)
} else if q.head+len(q.ring) == q.tail {
// Grow the ring.
nring := make([]types.Object, len(q.ring)*2)
// Copy the old elements.
part := q.ring[q.head%len(q.ring):]
if q.tail-q.head <= len(part) {
part = part[:q.tail-q.head]
copy(nring, part)
} else {
pos := copy(nring, part)
copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
}
q.ring, q.head, q.tail = nring, 0, q.tail-q.head
}
q.ring[q.tail%len(q.ring)] = obj
q.tail++
}
// popHead pops a node from the head of the queue. It panics if q is empty.
func (q *objQueue) popHead() types.Object {
if q.empty() {
panic("dequeue empty")
}
obj := q.ring[q.head%len(q.ring)]
q.head++
return obj
}

View File

@ -0,0 +1,606 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Indexed package import.
// See cmd/compile/internal/gc/iexport.go for the export data format.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
package gcimporter
import (
"bytes"
"encoding/binary"
"fmt"
"go/constant"
"go/token"
"go/types"
"io"
"sort"
)
type intReader struct {
*bytes.Reader
path string
}
func (r *intReader) int64() int64 {
i, err := binary.ReadVarint(r.Reader)
if err != nil {
errorf("import %q: read varint error: %v", r.path, err)
}
return i
}
func (r *intReader) uint64() uint64 {
i, err := binary.ReadUvarint(r.Reader)
if err != nil {
errorf("import %q: read varint error: %v", r.path, err)
}
return i
}
const predeclReserved = 32
type itag uint64
const (
// Types
definedType itag = iota
pointerType
sliceType
arrayType
chanType
mapType
signatureType
structType
interfaceType
)
// IImportData imports a package from the serialized package data
// and returns the number of bytes consumed and a reference to the package.
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
const currentVersion = 0
version := -1
defer func() {
if e := recover(); e != nil {
if version > currentVersion {
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
} else {
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
}
}
}()
r := &intReader{bytes.NewReader(data), path}
version = int(r.uint64())
switch version {
case currentVersion:
default:
errorf("unknown iexport format version %d", version)
}
sLen := int64(r.uint64())
dLen := int64(r.uint64())
whence, _ := r.Seek(0, io.SeekCurrent)
stringData := data[whence : whence+sLen]
declData := data[whence+sLen : whence+sLen+dLen]
r.Seek(sLen+dLen, io.SeekCurrent)
p := iimporter{
ipath: path,
stringData: stringData,
stringCache: make(map[uint64]string),
pkgCache: make(map[uint64]*types.Package),
declData: declData,
pkgIndex: make(map[*types.Package]map[string]uint64),
typCache: make(map[uint64]types.Type),
fake: fakeFileSet{
fset: fset,
files: make(map[string]*token.File),
},
}
for i, pt := range predeclared() {
p.typCache[uint64(i)] = pt
}
pkgList := make([]*types.Package, r.uint64())
for i := range pkgList {
pkgPathOff := r.uint64()
pkgPath := p.stringAt(pkgPathOff)
pkgName := p.stringAt(r.uint64())
_ = r.uint64() // package height; unused by go/types
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
if pkg == nil {
pkg = types.NewPackage(pkgPath, pkgName)
imports[pkgPath] = pkg
} else if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
}
p.pkgCache[pkgPathOff] = pkg
nameIndex := make(map[string]uint64)
for nSyms := r.uint64(); nSyms > 0; nSyms-- {
name := p.stringAt(r.uint64())
nameIndex[name] = r.uint64()
}
p.pkgIndex[pkg] = nameIndex
pkgList[i] = pkg
}
var localpkg *types.Package
for _, pkg := range pkgList {
if pkg.Path() == path {
localpkg = pkg
}
}
names := make([]string, 0, len(p.pkgIndex[localpkg]))
for name := range p.pkgIndex[localpkg] {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
p.doDecl(localpkg, name)
}
for _, typ := range p.interfaceList {
typ.Complete()
}
// record all referenced packages as imports
list := append(([]*types.Package)(nil), pkgList[1:]...)
sort.Sort(byPath(list))
localpkg.SetImports(list)
// package was imported completely and without errors
localpkg.MarkComplete()
consumed, _ := r.Seek(0, io.SeekCurrent)
return int(consumed), localpkg, nil
}
type iimporter struct {
ipath string
stringData []byte
stringCache map[uint64]string
pkgCache map[uint64]*types.Package
declData []byte
pkgIndex map[*types.Package]map[string]uint64
typCache map[uint64]types.Type
fake fakeFileSet
interfaceList []*types.Interface
}
func (p *iimporter) doDecl(pkg *types.Package, name string) {
// See if we've already imported this declaration.
if obj := pkg.Scope().Lookup(name); obj != nil {
return
}
off, ok := p.pkgIndex[pkg][name]
if !ok {
errorf("%v.%v not in index", pkg, name)
}
r := &importReader{p: p, currPkg: pkg}
r.declReader.Reset(p.declData[off:])
r.obj(name)
}
func (p *iimporter) stringAt(off uint64) string {
if s, ok := p.stringCache[off]; ok {
return s
}
slen, n := binary.Uvarint(p.stringData[off:])
if n <= 0 {
errorf("varint failed")
}
spos := off + uint64(n)
s := string(p.stringData[spos : spos+slen])
p.stringCache[off] = s
return s
}
func (p *iimporter) pkgAt(off uint64) *types.Package {
if pkg, ok := p.pkgCache[off]; ok {
return pkg
}
path := p.stringAt(off)
errorf("missing package %q in %q", path, p.ipath)
return nil
}
func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
return t
}
if off < predeclReserved {
errorf("predeclared type missing from cache: %v", off)
}
r := &importReader{p: p}
r.declReader.Reset(p.declData[off-predeclReserved:])
t := r.doType(base)
if base == nil || !isInterface(t) {
p.typCache[off] = t
}
return t
}
type importReader struct {
p *iimporter
declReader bytes.Reader
currPkg *types.Package
prevFile string
prevLine int64
}
func (r *importReader) obj(name string) {
tag := r.byte()
pos := r.pos()
switch tag {
case 'A':
typ := r.typ()
r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
case 'C':
typ, val := r.value()
r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
case 'F':
sig := r.signature(nil)
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
case 'T':
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types.NewTypeName(pos, r.currPkg, name, nil)
named := types.NewNamed(obj, nil, nil)
r.declare(obj)
underlying := r.p.typAt(r.uint64(), named).Underlying()
named.SetUnderlying(underlying)
if !isInterface(underlying) {
for n := r.uint64(); n > 0; n-- {
mpos := r.pos()
mname := r.ident()
recv := r.param()
msig := r.signature(recv)
named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
}
}
case 'V':
typ := r.typ()
r.declare(types.NewVar(pos, r.currPkg, name, typ))
default:
errorf("unexpected tag: %v", tag)
}
}
func (r *importReader) declare(obj types.Object) {
obj.Pkg().Scope().Insert(obj)
}
func (r *importReader) value() (typ types.Type, val constant.Value) {
typ = r.typ()
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
case types.IsBoolean:
val = constant.MakeBool(r.bool())
case types.IsString:
val = constant.MakeString(r.string())
case types.IsInteger:
val = r.mpint(b)
case types.IsFloat:
val = r.mpfloat(b)
case types.IsComplex:
re := r.mpfloat(b)
im := r.mpfloat(b)
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
default:
if b.Kind() == types.Invalid {
val = constant.MakeUnknown()
return
}
errorf("unexpected type %v", typ) // panics
panic("unreachable")
}
return
}
func intSize(b *types.Basic) (signed bool, maxBytes uint) {
if (b.Info() & types.IsUntyped) != 0 {
return true, 64
}
switch b.Kind() {
case types.Float32, types.Complex64:
return true, 3
case types.Float64, types.Complex128:
return true, 7
}
signed = (b.Info() & types.IsUnsigned) == 0
switch b.Kind() {
case types.Int8, types.Uint8:
maxBytes = 1
case types.Int16, types.Uint16:
maxBytes = 2
case types.Int32, types.Uint32:
maxBytes = 4
default:
maxBytes = 8
}
return
}
func (r *importReader) mpint(b *types.Basic) constant.Value {
signed, maxBytes := intSize(b)
maxSmall := 256 - maxBytes
if signed {
maxSmall = 256 - 2*maxBytes
}
if maxBytes == 1 {
maxSmall = 256
}
n, _ := r.declReader.ReadByte()
if uint(n) < maxSmall {
v := int64(n)
if signed {
v >>= 1
if n&1 != 0 {
v = ^v
}
}
return constant.MakeInt64(v)
}
v := -n
if signed {
v = -(n &^ 1) >> 1
}
if v < 1 || uint(v) > maxBytes {
errorf("weird decoding: %v, %v => %v", n, signed, v)
}
buf := make([]byte, v)
io.ReadFull(&r.declReader, buf)
// convert to little endian
// TODO(gri) go/constant should have a more direct conversion function
// (e.g., once it supports a big.Float based implementation)
for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
buf[i], buf[j] = buf[j], buf[i]
}
x := constant.MakeFromBytes(buf)
if signed && n&1 != 0 {
x = constant.UnaryOp(token.SUB, x, 0)
}
return x
}
func (r *importReader) mpfloat(b *types.Basic) constant.Value {
x := r.mpint(b)
if constant.Sign(x) == 0 {
return x
}
exp := r.int64()
switch {
case exp > 0:
x = constant.Shift(x, token.SHL, uint(exp))
case exp < 0:
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
x = constant.BinaryOp(x, token.QUO, d)
}
return x
}
func (r *importReader) ident() string {
return r.string()
}
func (r *importReader) qualifiedIdent() (*types.Package, string) {
name := r.string()
pkg := r.pkg()
return pkg, name
}
func (r *importReader) pos() token.Pos {
delta := r.int64()
if delta != deltaNewFile {
r.prevLine += delta
} else if l := r.int64(); l == -1 {
r.prevLine += deltaNewFile
} else {
r.prevFile = r.string()
r.prevLine = l
}
if r.prevFile == "" && r.prevLine == 0 {
return token.NoPos
}
return r.p.fake.pos(r.prevFile, int(r.prevLine))
}
func (r *importReader) typ() types.Type {
return r.p.typAt(r.uint64(), nil)
}
func isInterface(t types.Type) bool {
_, ok := t.(*types.Interface)
return ok
}
func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) doType(base *types.Named) types.Type {
switch k := r.kind(); k {
default:
errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
return nil
case definedType:
pkg, name := r.qualifiedIdent()
r.p.doDecl(pkg, name)
return pkg.Scope().Lookup(name).(*types.TypeName).Type()
case pointerType:
return types.NewPointer(r.typ())
case sliceType:
return types.NewSlice(r.typ())
case arrayType:
n := r.uint64()
return types.NewArray(r.typ(), int64(n))
case chanType:
dir := chanDir(int(r.uint64()))
return types.NewChan(dir, r.typ())
case mapType:
return types.NewMap(r.typ(), r.typ())
case signatureType:
r.currPkg = r.pkg()
return r.signature(nil)
case structType:
r.currPkg = r.pkg()
fields := make([]*types.Var, r.uint64())
tags := make([]string, len(fields))
for i := range fields {
fpos := r.pos()
fname := r.ident()
ftyp := r.typ()
emb := r.bool()
tag := r.string()
fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
tags[i] = tag
}
return types.NewStruct(fields, tags)
case interfaceType:
r.currPkg = r.pkg()
embeddeds := make([]types.Type, r.uint64())
for i := range embeddeds {
_ = r.pos()
embeddeds[i] = r.typ()
}
methods := make([]*types.Func, r.uint64())
for i := range methods {
mpos := r.pos()
mname := r.ident()
// TODO(mdempsky): Matches bimport.go, but I
// don't agree with this.
var recv *types.Var
if base != nil {
recv = types.NewVar(token.NoPos, r.currPkg, "", base)
}
msig := r.signature(recv)
methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
}
typ := newInterface(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
}
}
func (r *importReader) kind() itag {
return itag(r.uint64())
}
func (r *importReader) signature(recv *types.Var) *types.Signature {
params := r.paramList()
results := r.paramList()
variadic := params.Len() > 0 && r.bool()
return types.NewSignature(recv, params, results, variadic)
}
func (r *importReader) paramList() *types.Tuple {
xs := make([]*types.Var, r.uint64())
for i := range xs {
xs[i] = r.param()
}
return types.NewTuple(xs...)
}
func (r *importReader) param() *types.Var {
pos := r.pos()
name := r.ident()
typ := r.typ()
return types.NewParam(pos, r.currPkg, name, typ)
}
func (r *importReader) bool() bool {
return r.uint64() != 0
}
func (r *importReader) int64() int64 {
n, err := binary.ReadVarint(&r.declReader)
if err != nil {
errorf("readVarint: %v", err)
}
return n
}
func (r *importReader) uint64() uint64 {
n, err := binary.ReadUvarint(&r.declReader)
if err != nil {
errorf("readUvarint: %v", err)
}
return n
}
func (r *importReader) byte() byte {
x, err := r.declReader.ReadByte()
if err != nil {
errorf("declReader.ReadByte: %v", err)
}
return x
}

View File

@ -0,0 +1,21 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
named := make([]*types.Named, len(embeddeds))
for i, e := range embeddeds {
var ok bool
named[i], ok = e.(*types.Named)
if !ok {
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
}
}
return types.NewInterface(methods, named)
}

View File

@ -0,0 +1,13 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
return types.NewInterfaceType(methods, embeddeds)
}

View File

@ -0,0 +1,160 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package packagesdriver fetches type sizes for go/packages and go/analysis.
package packagesdriver
import (
"bytes"
"context"
"encoding/json"
"fmt"
"go/types"
"log"
"os"
"os/exec"
"strings"
"time"
)
var debug = false
// GetSizes returns the sizes used by the underlying driver with the given parameters.
func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
// TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver.
const toolPrefix = "GOPACKAGESDRIVER="
tool := ""
for _, env := range env {
if val := strings.TrimPrefix(env, toolPrefix); val != env {
tool = val
}
}
if tool == "" {
var err error
tool, err = exec.LookPath("gopackagesdriver")
if err != nil {
// We did not find the driver, so use "go list".
tool = "off"
}
}
if tool == "off" {
return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData)
}
req, err := json.Marshal(struct {
Command string `json:"command"`
Env []string `json:"env"`
BuildFlags []string `json:"build_flags"`
}{
Command: "sizes",
Env: env,
BuildFlags: buildFlags,
})
if err != nil {
return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
}
buf := new(bytes.Buffer)
cmd := exec.CommandContext(ctx, tool)
cmd.Dir = dir
cmd.Env = env
cmd.Stdin = bytes.NewReader(req)
cmd.Stdout = buf
cmd.Stderr = new(bytes.Buffer)
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
}
var response struct {
// Sizes, if not nil, is the types.Sizes to use when type checking.
Sizes *types.StdSizes
}
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
return nil, err
}
return response.Sizes, nil
}
func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"}
args = append(args, buildFlags...)
args = append(args, "--", "unsafe")
stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...)
if err != nil {
return nil, err
}
fields := strings.Fields(stdout.String())
if len(fields) < 2 {
return nil, fmt.Errorf("could not determine GOARCH and Go compiler")
}
goarch := fields[0]
compiler := fields[1]
return types.SizesFor(compiler, goarch), nil
}
// InvokeGo returns the stdout of a go command invocation.
func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) {
if debug {
defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now())
}
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cmd := exec.CommandContext(ctx, "go", args...)
// On darwin the cwd gets resolved to the real path, which breaks anything that
// expects the working directory to keep the original path, including the
// go command when dealing with modules.
// The Go stdlib has a special feature where if the cwd and the PWD are the
// same node then it trusts the PWD, so by setting it in the env for the child
// process we fix up all the paths returned by the go command.
cmd.Env = append(append([]string{}, env...), "PWD="+dir)
cmd.Dir = dir
cmd.Stdout = stdout
cmd.Stderr = stderr
if err := cmd.Run(); err != nil {
exitErr, ok := err.(*exec.ExitError)
if !ok {
// Catastrophic error:
// - executable not found
// - context cancellation
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
}
// Export mode entails a build.
// If that build fails, errors appear on stderr
// (despite the -e flag) and the Export field is blank.
// Do not fail in that case.
if !usesExportData {
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
}
}
// As of writing, go list -export prints some non-fatal compilation
// errors to stderr, even with -e set. We would prefer that it put
// them in the Package.Error JSON (see https://golang.org/issue/26319).
// In the meantime, there's nowhere good to put them, but they can
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
// is set.
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr)
}
// debugging
if false {
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout)
}
return stdout, nil
}
func cmdDebugStr(envlist []string, args ...string) string {
env := make(map[string]string)
for _, kv := range envlist {
split := strings.Split(kv, "=")
k, v := split[0], split[1]
env[k] = v
}
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args)
}

222
vendor/golang.org/x/tools/go/packages/doc.go generated vendored Normal file
View File

@ -0,0 +1,222 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package packages loads Go packages for inspection and analysis.
The Load function takes as input a list of patterns and return a list of Package
structs describing individual packages matched by those patterns.
The LoadMode controls the amount of detail in the loaded packages.
Load passes most patterns directly to the underlying build tool,
but all patterns with the prefix "query=", where query is a
non-empty string of letters from [a-z], are reserved and may be
interpreted as query operators.
Two query operators are currently supported: "file" and "pattern".
The query "file=path/to/file.go" matches the package or packages enclosing
the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
might return the packages "fmt" and "fmt [fmt.test]".
The query "pattern=string" causes "string" to be passed directly to
the underlying build tool. In most cases this is unnecessary,
but an application can use Load("pattern=" + x) as an escaping mechanism
to ensure that x is not interpreted as a query operator if it contains '='.
All other query operators are reserved for future use and currently
cause Load to report an error.
The Package struct provides basic information about the package, including
- ID, a unique identifier for the package in the returned set;
- GoFiles, the names of the package's Go source files;
- Imports, a map from source import strings to the Packages they name;
- Types, the type information for the package's exported symbols;
- Syntax, the parsed syntax trees for the package's source code; and
- TypeInfo, the result of a complete type-check of the package syntax trees.
(See the documentation for type Package for the complete list of fields
and more detailed descriptions.)
For example,
Load(nil, "bytes", "unicode...")
returns four Package structs describing the standard library packages
bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
can match multiple packages and that a package might be matched by
multiple patterns: in general it is not possible to determine which
packages correspond to which patterns.
Note that the list returned by Load contains only the packages matched
by the patterns. Their dependencies can be found by walking the import
graph using the Imports fields.
The Load function can be configured by passing a pointer to a Config as
the first argument. A nil Config is equivalent to the zero Config, which
causes Load to run in LoadFiles mode, collecting minimal information.
See the documentation for type Config for details.
As noted earlier, the Config.Mode controls the amount of detail
reported about the loaded packages, with each mode returning all the data of the
previous mode with some extra added. See the documentation for type LoadMode
for details.
Most tools should pass their command-line arguments (after any flags)
uninterpreted to the loader, so that the loader can interpret them
according to the conventions of the underlying build system.
See the Example function for typical usage.
*/
package packages // import "golang.org/x/tools/go/packages"
/*
Motivation and design considerations
The new package's design solves problems addressed by two existing
packages: go/build, which locates and describes packages, and
golang.org/x/tools/go/loader, which loads, parses and type-checks them.
The go/build.Package structure encodes too much of the 'go build' way
of organizing projects, leaving us in need of a data type that describes a
package of Go source code independent of the underlying build system.
We wanted something that works equally well with go build and vgo, and
also other build systems such as Bazel and Blaze, making it possible to
construct analysis tools that work in all these environments.
Tools such as errcheck and staticcheck were essentially unavailable to
the Go community at Google, and some of Google's internal tools for Go
are unavailable externally.
This new package provides a uniform way to obtain package metadata by
querying each of these build systems, optionally supporting their
preferred command-line notations for packages, so that tools integrate
neatly with users' build environments. The Metadata query function
executes an external query tool appropriate to the current workspace.
Loading packages always returns the complete import graph "all the way down",
even if all you want is information about a single package, because the query
mechanisms of all the build systems we currently support ({go,vgo} list, and
blaze/bazel aspect-based query) cannot provide detailed information
about one package without visiting all its dependencies too, so there is
no additional asymptotic cost to providing transitive information.
(This property might not be true of a hypothetical 5th build system.)
In calls to TypeCheck, all initial packages, and any package that
transitively depends on one of them, must be loaded from source.
Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
source; D may be loaded from export data, and E may not be loaded at all
(though it's possible that D's export data mentions it, so a
types.Package may be created for it and exposed.)
The old loader had a feature to suppress type-checking of function
bodies on a per-package basis, primarily intended to reduce the work of
obtaining type information for imported packages. Now that imports are
satisfied by export data, the optimization no longer seems necessary.
Despite some early attempts, the old loader did not exploit export data,
instead always using the equivalent of WholeProgram mode. This was due
to the complexity of mixing source and export data packages (now
resolved by the upward traversal mentioned above), and because export data
files were nearly always missing or stale. Now that 'go build' supports
caching, all the underlying build systems can guarantee to produce
export data in a reasonable (amortized) time.
Test "main" packages synthesized by the build system are now reported as
first-class packages, avoiding the need for clients (such as go/ssa) to
reinvent this generation logic.
One way in which go/packages is simpler than the old loader is in its
treatment of in-package tests. In-package tests are packages that
consist of all the files of the library under test, plus the test files.
The old loader constructed in-package tests by a two-phase process of
mutation called "augmentation": first it would construct and type check
all the ordinary library packages and type-check the packages that
depend on them; then it would add more (test) files to the package and
type-check again. This two-phase approach had four major problems:
1) in processing the tests, the loader modified the library package,
leaving no way for a client application to see both the test
package and the library package; one would mutate into the other.
2) because test files can declare additional methods on types defined in
the library portion of the package, the dispatch of method calls in
the library portion was affected by the presence of the test files.
This should have been a clue that the packages were logically
different.
3) this model of "augmentation" assumed at most one in-package test
per library package, which is true of projects using 'go build',
but not other build systems.
4) because of the two-phase nature of test processing, all packages that
import the library package had to be processed before augmentation,
forcing a "one-shot" API and preventing the client from calling Load
in several times in sequence as is now possible in WholeProgram mode.
(TypeCheck mode has a similar one-shot restriction for a different reason.)
Early drafts of this package supported "multi-shot" operation.
Although it allowed clients to make a sequence of calls (or concurrent
calls) to Load, building up the graph of Packages incrementally,
it was of marginal value: it complicated the API
(since it allowed some options to vary across calls but not others),
it complicated the implementation,
it cannot be made to work in Types mode, as explained above,
and it was less efficient than making one combined call (when this is possible).
Among the clients we have inspected, none made multiple calls to load
but could not be easily and satisfactorily modified to make only a single call.
However, applications changes may be required.
For example, the ssadump command loads the user-specified packages
and in addition the runtime package. It is tempting to simply append
"runtime" to the user-provided list, but that does not work if the user
specified an ad-hoc package such as [a.go b.go].
Instead, ssadump no longer requests the runtime package,
but seeks it among the dependencies of the user-specified packages,
and emits an error if it is not found.
Overlays: The Overlay field in the Config allows providing alternate contents
for Go source files, by providing a mapping from file path to contents.
go/packages will pull in new imports added in overlay files when go/packages
is run in LoadImports mode or greater.
Overlay support for the go list driver isn't complete yet: if the file doesn't
exist on disk, it will only be recognized in an overlay if it is a non-test file
and the package would be reported even without the overlay.
Questions & Tasks
- Add GOARCH/GOOS?
They are not portable concepts, but could be made portable.
Our goal has been to allow users to express themselves using the conventions
of the underlying build system: if the build system honors GOARCH
during a build and during a metadata query, then so should
applications built atop that query mechanism.
Conversely, if the target architecture of the build is determined by
command-line flags, the application can pass the relevant
flags through to the build system using a command such as:
myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
However, this approach is low-level, unwieldy, and non-portable.
GOOS and GOARCH seem important enough to warrant a dedicated option.
- How should we handle partial failures such as a mixture of good and
malformed patterns, existing and non-existent packages, successful and
failed builds, import failures, import cycles, and so on, in a call to
Load?
- Support bazel, blaze, and go1.10 list, not just go1.11 list.
- Handle (and test) various partial success cases, e.g.
a mixture of good packages and:
invalid patterns
nonexistent packages
empty packages
packages with malformed package or import declarations
unreadable files
import cycles
other parse errors
type errors
Make sure we record errors at the correct place in the graph.
- Missing packages among initial arguments are not reported.
Return bogus packages for them, like golist does.
- "undeclared name" errors (for example) are reported out of source file
order. I suspect this is due to the breadth-first resolution now used
by go/types. Is that a bug? Discuss with gri.
*/

79
vendor/golang.org/x/tools/go/packages/external.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file enables an external tool to intercept package requests.
// If the tool is present then its results are used in preference to
// the go list command.
package packages
import (
"bytes"
"encoding/json"
"fmt"
"os/exec"
"strings"
)
// Driver
type driverRequest struct {
Command string `json "command"`
Mode LoadMode `json:"mode"`
Env []string `json:"env"`
BuildFlags []string `json:"build_flags"`
Tests bool `json:"tests"`
Overlay map[string][]byte `json:"overlay"`
}
// findExternalDriver returns the file path of a tool that supplies
// the build system package structure, or "" if not found."
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
func findExternalDriver(cfg *Config) driver {
const toolPrefix = "GOPACKAGESDRIVER="
tool := ""
for _, env := range cfg.Env {
if val := strings.TrimPrefix(env, toolPrefix); val != env {
tool = val
}
}
if tool != "" && tool == "off" {
return nil
}
if tool == "" {
var err error
tool, err = exec.LookPath("gopackagesdriver")
if err != nil {
return nil
}
}
return func(cfg *Config, words ...string) (*driverResponse, error) {
req, err := json.Marshal(driverRequest{
Mode: cfg.Mode,
Env: cfg.Env,
BuildFlags: cfg.BuildFlags,
Tests: cfg.Tests,
Overlay: cfg.Overlay,
})
if err != nil {
return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
}
buf := new(bytes.Buffer)
cmd := exec.CommandContext(cfg.Context, tool, words...)
cmd.Dir = cfg.Dir
cmd.Env = cfg.Env
cmd.Stdin = bytes.NewReader(req)
cmd.Stdout = buf
cmd.Stderr = new(bytes.Buffer)
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
}
var response driverResponse
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
return nil, err
}
return &response, nil
}
}

821
vendor/golang.org/x/tools/go/packages/golist.go generated vendored Normal file
View File

@ -0,0 +1,821 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packages
import (
"bytes"
"encoding/json"
"fmt"
"go/types"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/tools/go/internal/packagesdriver"
"golang.org/x/tools/internal/gopathwalk"
"golang.org/x/tools/internal/semver"
)
// debug controls verbose logging.
var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
// A goTooOldError reports that the go command
// found by exec.LookPath is too old to use the new go list behavior.
type goTooOldError struct {
error
}
// responseDeduper wraps a driverResponse, deduplicating its contents.
type responseDeduper struct {
seenRoots map[string]bool
seenPackages map[string]*Package
dr *driverResponse
}
// init fills in r with a driverResponse.
func (r *responseDeduper) init(dr *driverResponse) {
r.dr = dr
r.seenRoots = map[string]bool{}
r.seenPackages = map[string]*Package{}
for _, pkg := range dr.Packages {
r.seenPackages[pkg.ID] = pkg
}
for _, root := range dr.Roots {
r.seenRoots[root] = true
}
}
func (r *responseDeduper) addPackage(p *Package) {
if r.seenPackages[p.ID] != nil {
return
}
r.seenPackages[p.ID] = p
r.dr.Packages = append(r.dr.Packages, p)
}
func (r *responseDeduper) addRoot(id string) {
if r.seenRoots[id] {
return
}
r.seenRoots[id] = true
r.dr.Roots = append(r.dr.Roots, id)
}
// goListDriver uses the go list command to interpret the patterns and produce
// the build system package structure.
// See driver for more details.
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
var sizes types.Sizes
var sizeserr error
var sizeswg sync.WaitGroup
if cfg.Mode&NeedTypesSizes != 0 {
sizeswg.Add(1)
go func() {
sizes, sizeserr = getSizes(cfg)
sizeswg.Done()
}()
}
// Determine files requested in contains patterns
var containFiles []string
var packagesNamed []string
restPatterns := make([]string, 0, len(patterns))
// Extract file= and other [querytype]= patterns. Report an error if querytype
// doesn't exist.
extractQueries:
for _, pattern := range patterns {
eqidx := strings.Index(pattern, "=")
if eqidx < 0 {
restPatterns = append(restPatterns, pattern)
} else {
query, value := pattern[:eqidx], pattern[eqidx+len("="):]
switch query {
case "file":
containFiles = append(containFiles, value)
case "pattern":
restPatterns = append(restPatterns, value)
case "iamashamedtousethedisabledqueryname":
packagesNamed = append(packagesNamed, value)
case "": // not a reserved query
restPatterns = append(restPatterns, pattern)
default:
for _, rune := range query {
if rune < 'a' || rune > 'z' { // not a reserved query
restPatterns = append(restPatterns, pattern)
continue extractQueries
}
}
// Reject all other patterns containing "="
return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
}
}
}
response := &responseDeduper{}
var err error
// See if we have any patterns to pass through to go list. Zero initial
// patterns also requires a go list call, since it's the equivalent of
// ".".
if len(restPatterns) > 0 || len(patterns) == 0 {
dr, err := golistDriverCurrent(cfg, restPatterns...)
if err != nil {
return nil, err
}
response.init(dr)
} else {
response.init(&driverResponse{})
}
sizeswg.Wait()
if sizeserr != nil {
return nil, sizeserr
}
// types.SizesFor always returns nil or a *types.StdSizes
response.dr.Sizes, _ = sizes.(*types.StdSizes)
var containsCandidates []string
if len(containFiles) != 0 {
if err := runContainsQueries(cfg, golistDriverCurrent, response, containFiles); err != nil {
return nil, err
}
}
if len(packagesNamed) != 0 {
if err := runNamedQueries(cfg, golistDriverCurrent, response, packagesNamed); err != nil {
return nil, err
}
}
modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response.dr)
if err != nil {
return nil, err
}
if len(containFiles) > 0 {
containsCandidates = append(containsCandidates, modifiedPkgs...)
containsCandidates = append(containsCandidates, needPkgs...)
}
if len(needPkgs) > 0 {
addNeededOverlayPackages(cfg, golistDriverCurrent, response, needPkgs)
if err != nil {
return nil, err
}
}
// Check candidate packages for containFiles.
if len(containFiles) > 0 {
for _, id := range containsCandidates {
pkg := response.seenPackages[id]
for _, f := range containFiles {
for _, g := range pkg.GoFiles {
if sameFile(f, g) {
response.addRoot(id)
}
}
}
}
}
return response.dr, nil
}
func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error {
dr, err := driver(cfg, pkgs...)
if err != nil {
return err
}
for _, pkg := range dr.Packages {
response.addPackage(pkg)
}
return nil
}
func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
for _, query := range queries {
// TODO(matloob): Do only one query per directory.
fdir := filepath.Dir(query)
// Pass absolute path of directory to go list so that it knows to treat it as a directory,
// not a package path.
pattern, err := filepath.Abs(fdir)
if err != nil {
return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
}
dirResponse, err := driver(cfg, pattern)
if err != nil {
return err
}
isRoot := make(map[string]bool, len(dirResponse.Roots))
for _, root := range dirResponse.Roots {
isRoot[root] = true
}
for _, pkg := range dirResponse.Packages {
// Add any new packages to the main set
// We don't bother to filter packages that will be dropped by the changes of roots,
// that will happen anyway during graph construction outside this function.
// Over-reporting packages is not a problem.
response.addPackage(pkg)
// if the package was not a root one, it cannot have the file
if !isRoot[pkg.ID] {
continue
}
for _, pkgFile := range pkg.GoFiles {
if filepath.Base(query) == filepath.Base(pkgFile) {
response.addRoot(pkg.ID)
break
}
}
}
}
return nil
}
// modCacheRegexp splits a path in a module cache into module, module version, and package.
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error {
// calling `go env` isn't free; bail out if there's nothing to do.
if len(queries) == 0 {
return nil
}
// Determine which directories are relevant to scan.
roots, modRoot, err := roots(cfg)
if err != nil {
return err
}
// Scan the selected directories. Simple matches, from GOPATH/GOROOT
// or the local module, can simply be "go list"ed. Matches from the
// module cache need special treatment.
var matchesMu sync.Mutex
var simpleMatches, modCacheMatches []string
add := func(root gopathwalk.Root, dir string) {
// Walk calls this concurrently; protect the result slices.
matchesMu.Lock()
defer matchesMu.Unlock()
path := dir
if dir != root.Path {
path = dir[len(root.Path)+1:]
}
if pathMatchesQueries(path, queries) {
switch root.Type {
case gopathwalk.RootModuleCache:
modCacheMatches = append(modCacheMatches, path)
case gopathwalk.RootCurrentModule:
// We'd need to read go.mod to find the full
// import path. Relative's easier.
rel, err := filepath.Rel(cfg.Dir, dir)
if err != nil {
// This ought to be impossible, since
// we found dir in the current module.
panic(err)
}
simpleMatches = append(simpleMatches, "./"+rel)
case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT:
simpleMatches = append(simpleMatches, path)
}
}
}
startWalk := time.Now()
gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug})
if debug {
log.Printf("%v for walk", time.Since(startWalk))
}
// Weird special case: the top-level package in a module will be in
// whatever directory the user checked the repository out into. It's
// more reasonable for that to not match the package name. So, if there
// are any Go files in the mod root, query it just to be safe.
if modRoot != "" {
rel, err := filepath.Rel(cfg.Dir, modRoot)
if err != nil {
panic(err) // See above.
}
files, err := ioutil.ReadDir(modRoot)
for _, f := range files {
if strings.HasSuffix(f.Name(), ".go") {
simpleMatches = append(simpleMatches, rel)
break
}
}
}
addResponse := func(r *driverResponse) {
for _, pkg := range r.Packages {
response.addPackage(pkg)
for _, name := range queries {
if pkg.Name == name {
response.addRoot(pkg.ID)
break
}
}
}
}
if len(simpleMatches) != 0 {
resp, err := driver(cfg, simpleMatches...)
if err != nil {
return err
}
addResponse(resp)
}
// Module cache matches are tricky. We want to avoid downloading new
// versions of things, so we need to use the ones present in the cache.
// go list doesn't accept version specifiers, so we have to write out a
// temporary module, and do the list in that module.
if len(modCacheMatches) != 0 {
// Collect all the matches, deduplicating by major version
// and preferring the newest.
type modInfo struct {
mod string
major string
}
mods := make(map[modInfo]string)
var imports []string
for _, modPath := range modCacheMatches {
matches := modCacheRegexp.FindStringSubmatch(modPath)
mod, ver := filepath.ToSlash(matches[1]), matches[2]
importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3]))
major := semver.Major(ver)
if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 {
mods[modInfo{mod, major}] = ver
}
imports = append(imports, importPath)
}
// Build the temporary module.
var gomod bytes.Buffer
gomod.WriteString("module modquery\nrequire (\n")
for mod, version := range mods {
gomod.WriteString("\t" + mod.mod + " " + version + "\n")
}
gomod.WriteString(")\n")
tmpCfg := *cfg
// We're only trying to look at stuff in the module cache, so
// disable the network. This should speed things up, and has
// prevented errors in at least one case, #28518.
tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...))
var err error
tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery")
if err != nil {
return err
}
defer os.RemoveAll(tmpCfg.Dir)
if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil {
return fmt.Errorf("writing go.mod for module cache query: %v", err)
}
// Run the query, using the import paths calculated from the matches above.
resp, err := driver(&tmpCfg, imports...)
if err != nil {
return fmt.Errorf("querying module cache matches: %v", err)
}
addResponse(resp)
}
return nil
}
func getSizes(cfg *Config) (types.Sizes, error) {
return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg))
}
// roots selects the appropriate paths to walk based on the passed-in configuration,
// particularly the environment and the presence of a go.mod in cfg.Dir's parents.
func roots(cfg *Config) ([]gopathwalk.Root, string, error) {
stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD")
if err != nil {
return nil, "", err
}
fields := strings.Split(stdout.String(), "\n")
if len(fields) != 4 || len(fields[3]) != 0 {
return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String())
}
goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2]
var modDir string
if gomod != "" {
modDir = filepath.Dir(gomod)
}
var roots []gopathwalk.Root
// Always add GOROOT.
roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT})
// If modules are enabled, scan the module dir.
if modDir != "" {
roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule})
}
// Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode.
for _, p := range gopath {
if modDir != "" {
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
} else {
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH})
}
}
return roots, modDir, nil
}
// These functions were copied from goimports. See further documentation there.
// pathMatchesQueries is adapted from pkgIsCandidate.
// TODO: is it reasonable to do Contains here, rather than an exact match on a path component?
func pathMatchesQueries(path string, queries []string) bool {
lastTwo := lastTwoComponents(path)
for _, query := range queries {
if strings.Contains(lastTwo, query) {
return true
}
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) {
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
if strings.Contains(lastTwo, query) {
return true
}
}
}
return false
}
// lastTwoComponents returns at most the last two path components
// of v, using either / or \ as the path separator.
func lastTwoComponents(v string) string {
nslash := 0
for i := len(v) - 1; i >= 0; i-- {
if v[i] == '/' || v[i] == '\\' {
nslash++
if nslash == 2 {
return v[i:]
}
}
}
return v
}
func hasHyphenOrUpperASCII(s string) bool {
for i := 0; i < len(s); i++ {
b := s[i]
if b == '-' || ('A' <= b && b <= 'Z') {
return true
}
}
return false
}
func lowerASCIIAndRemoveHyphen(s string) (ret string) {
buf := make([]byte, 0, len(s))
for i := 0; i < len(s); i++ {
b := s[i]
switch {
case b == '-':
continue
case 'A' <= b && b <= 'Z':
buf = append(buf, b+('a'-'A'))
default:
buf = append(buf, b)
}
}
return string(buf)
}
// Fields must match go list;
// see $GOROOT/src/cmd/go/internal/load/pkg.go.
type jsonPackage struct {
ImportPath string
Dir string
Name string
Export string
GoFiles []string
CompiledGoFiles []string
CFiles []string
CgoFiles []string
CXXFiles []string
MFiles []string
HFiles []string
FFiles []string
SFiles []string
SwigFiles []string
SwigCXXFiles []string
SysoFiles []string
Imports []string
ImportMap map[string]string
Deps []string
TestGoFiles []string
TestImports []string
XTestGoFiles []string
XTestImports []string
ForTest string // q in a "p [q.test]" package, else ""
DepOnly bool
Error *jsonPackageError
}
type jsonPackageError struct {
ImportStack []string
Pos string
Err string
}
func otherFiles(p *jsonPackage) [][]string {
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
}
// golistDriverCurrent uses the "go list" command to expand the
// pattern words and return metadata for the specified packages.
// dir may be "" and env may be nil, as per os/exec.Command.
func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) {
// go list uses the following identifiers in ImportPath and Imports:
//
// "p" -- importable package or main (command)
// "q.test" -- q's test executable
// "p [q.test]" -- variant of p as built for q's test executable
// "q_test [q.test]" -- q's external test package
//
// The packages p that are built differently for a test q.test
// are q itself, plus any helpers used by the external test q_test,
// typically including "testing" and all its dependencies.
// Run "go list" for complete
// information on the specified packages.
buf, err := invokeGo(cfg, golistargs(cfg, words)...)
if err != nil {
return nil, err
}
seen := make(map[string]*jsonPackage)
// Decode the JSON and convert it to Package form.
var response driverResponse
for dec := json.NewDecoder(buf); dec.More(); {
p := new(jsonPackage)
if err := dec.Decode(p); err != nil {
return nil, fmt.Errorf("JSON decoding failed: %v", err)
}
if p.ImportPath == "" {
// The documentation for go list says that “[e]rroneous packages will have
// a non-empty ImportPath”. If for some reason it comes back empty, we
// prefer to error out rather than silently discarding data or handing
// back a package without any way to refer to it.
if p.Error != nil {
return nil, Error{
Pos: p.Error.Pos,
Msg: p.Error.Err,
}
}
return nil, fmt.Errorf("package missing import path: %+v", p)
}
if old, found := seen[p.ImportPath]; found {
if !reflect.DeepEqual(p, old) {
return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
}
// skip the duplicate
continue
}
seen[p.ImportPath] = p
pkg := &Package{
Name: p.Name,
ID: p.ImportPath,
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
}
// Work around https://golang.org/issue/28749:
// cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
// Filter out any elements of CompiledGoFiles that are also in OtherFiles.
// We have to keep this workaround in place until go1.12 is a distant memory.
if len(pkg.OtherFiles) > 0 {
other := make(map[string]bool, len(pkg.OtherFiles))
for _, f := range pkg.OtherFiles {
other[f] = true
}
out := pkg.CompiledGoFiles[:0]
for _, f := range pkg.CompiledGoFiles {
if other[f] {
continue
}
out = append(out, f)
}
pkg.CompiledGoFiles = out
}
// Extract the PkgPath from the package's ID.
if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
pkg.PkgPath = pkg.ID[:i]
} else {
pkg.PkgPath = pkg.ID
}
if pkg.PkgPath == "unsafe" {
pkg.GoFiles = nil // ignore fake unsafe.go file
}
// Assume go list emits only absolute paths for Dir.
if p.Dir != "" && !filepath.IsAbs(p.Dir) {
log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
}
if p.Export != "" && !filepath.IsAbs(p.Export) {
pkg.ExportFile = filepath.Join(p.Dir, p.Export)
} else {
pkg.ExportFile = p.Export
}
// imports
//
// Imports contains the IDs of all imported packages.
// ImportsMap records (path, ID) only where they differ.
ids := make(map[string]bool)
for _, id := range p.Imports {
ids[id] = true
}
pkg.Imports = make(map[string]*Package)
for path, id := range p.ImportMap {
pkg.Imports[path] = &Package{ID: id} // non-identity import
delete(ids, id)
}
for id := range ids {
if id == "C" {
continue
}
pkg.Imports[id] = &Package{ID: id} // identity import
}
if !p.DepOnly {
response.Roots = append(response.Roots, pkg.ID)
}
// Work around for pre-go.1.11 versions of go list.
// TODO(matloob): they should be handled by the fallback.
// Can we delete this?
if len(pkg.CompiledGoFiles) == 0 {
pkg.CompiledGoFiles = pkg.GoFiles
}
if p.Error != nil {
pkg.Errors = append(pkg.Errors, Error{
Pos: p.Error.Pos,
Msg: p.Error.Err,
})
}
response.Packages = append(response.Packages, pkg)
}
return &response, nil
}
// absJoin absolutizes and flattens the lists of files.
func absJoin(dir string, fileses ...[]string) (res []string) {
for _, files := range fileses {
for _, file := range files {
if !filepath.IsAbs(file) {
file = filepath.Join(dir, file)
}
res = append(res, file)
}
}
return res
}
func golistargs(cfg *Config, words []string) []string {
const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
fullargs := []string{
"list", "-e", "-json",
fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0),
fmt.Sprintf("-test=%t", cfg.Tests),
fmt.Sprintf("-export=%t", usesExportData(cfg)),
fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0),
// go list doesn't let you pass -test and -find together,
// probably because you'd just get the TestMain.
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0),
}
fullargs = append(fullargs, cfg.BuildFlags...)
fullargs = append(fullargs, "--")
fullargs = append(fullargs, words...)
return fullargs
}
// invokeGo returns the stdout of a go command invocation.
func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cmd := exec.CommandContext(cfg.Context, "go", args...)
// On darwin the cwd gets resolved to the real path, which breaks anything that
// expects the working directory to keep the original path, including the
// go command when dealing with modules.
// The Go stdlib has a special feature where if the cwd and the PWD are the
// same node then it trusts the PWD, so by setting it in the env for the child
// process we fix up all the paths returned by the go command.
cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir)
cmd.Dir = cfg.Dir
cmd.Stdout = stdout
cmd.Stderr = stderr
if debug {
defer func(start time.Time) {
log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr)
}(time.Now())
}
if err := cmd.Run(); err != nil {
// Check for 'go' executable not being found.
if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
}
exitErr, ok := err.(*exec.ExitError)
if !ok {
// Catastrophic error:
// - context cancellation
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
}
// Old go version?
if strings.Contains(stderr.String(), "flag provided but not defined") {
return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
}
// This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
// the error in the Err section of stdout in case -e option is provided.
// This fix is provided for backwards compatibility.
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") {
output := fmt.Sprintf(`{"ImportPath": "","Incomplete": true,"Error": {"Pos": "","Err": %s}}`,
strconv.Quote(strings.Trim(stderr.String(), "\n")))
return bytes.NewBufferString(output), nil
}
// Export mode entails a build.
// If that build fails, errors appear on stderr
// (despite the -e flag) and the Export field is blank.
// Do not fail in that case.
// The same is true if an ad-hoc package given to go list doesn't exist.
// TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
// packages don't exist or a build fails.
if !usesExportData(cfg) && !containsGoFile(args) {
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
}
}
// As of writing, go list -export prints some non-fatal compilation
// errors to stderr, even with -e set. We would prefer that it put
// them in the Package.Error JSON (see https://golang.org/issue/26319).
// In the meantime, there's nowhere good to put them, but they can
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
// is set.
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr)
}
// debugging
if false {
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout)
}
return stdout, nil
}
func containsGoFile(s []string) bool {
for _, f := range s {
if strings.HasSuffix(f, ".go") {
return true
}
}
return false
}
func cmdDebugStr(cmd *exec.Cmd, args ...string) string {
env := make(map[string]string)
for _, kv := range cmd.Env {
split := strings.Split(kv, "=")
k, v := split[0], split[1]
env[k] = v
}
var quotedArgs []string
for _, arg := range args {
quotedArgs = append(quotedArgs, strconv.Quote(arg))
}
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " "))
}

104
vendor/golang.org/x/tools/go/packages/golist_overlay.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
package packages
import (
"go/parser"
"go/token"
"path/filepath"
"strconv"
"strings"
)
// processGolistOverlay provides rudimentary support for adding
// files that don't exist on disk to an overlay. The results can be
// sometimes incorrect.
// TODO(matloob): Handle unsupported cases, including the following:
// - test files
// - adding test and non-test files to test variants of packages
// - determining the correct package to add given a new import path
// - creating packages that don't exist
func processGolistOverlay(cfg *Config, response *driverResponse) (modifiedPkgs, needPkgs []string, err error) {
havePkgs := make(map[string]string) // importPath -> non-test package ID
needPkgsSet := make(map[string]bool)
modifiedPkgsSet := make(map[string]bool)
for _, pkg := range response.Packages {
// This is an approximation of import path to id. This can be
// wrong for tests, vendored packages, and a number of other cases.
havePkgs[pkg.PkgPath] = pkg.ID
}
outer:
for path, contents := range cfg.Overlay {
base := filepath.Base(path)
if strings.HasSuffix(path, "_test.go") {
// Overlays don't support adding new test files yet.
// TODO(matloob): support adding new test files.
continue
}
dir := filepath.Dir(path)
for _, pkg := range response.Packages {
var dirContains, fileExists bool
for _, f := range pkg.GoFiles {
if sameFile(filepath.Dir(f), dir) {
dirContains = true
}
if filepath.Base(f) == base {
fileExists = true
}
}
if dirContains {
if !fileExists {
pkg.GoFiles = append(pkg.GoFiles, path) // TODO(matloob): should the file just be added to GoFiles?
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, path)
modifiedPkgsSet[pkg.ID] = true
}
imports, err := extractImports(path, contents)
if err != nil {
// Let the parser or type checker report errors later.
continue outer
}
for _, imp := range imports {
_, found := pkg.Imports[imp]
if !found {
needPkgsSet[imp] = true
// TODO(matloob): Handle cases when the following block isn't correct.
// These include imports of test variants, imports of vendored packages, etc.
id, ok := havePkgs[imp]
if !ok {
id = imp
}
pkg.Imports[imp] = &Package{ID: id}
}
}
continue outer
}
}
}
needPkgs = make([]string, 0, len(needPkgsSet))
for pkg := range needPkgsSet {
needPkgs = append(needPkgs, pkg)
}
modifiedPkgs = make([]string, 0, len(modifiedPkgsSet))
for pkg := range modifiedPkgsSet {
modifiedPkgs = append(modifiedPkgs, pkg)
}
return modifiedPkgs, needPkgs, err
}
func extractImports(filename string, contents []byte) ([]string, error) {
f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset?
if err != nil {
return nil, err
}
var res []string
for _, imp := range f.Imports {
quotedPath := imp.Path.Value
path, err := strconv.Unquote(quotedPath)
if err != nil {
return nil, err
}
res = append(res, path)
}
return res, nil
}

1048
vendor/golang.org/x/tools/go/packages/packages.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

55
vendor/golang.org/x/tools/go/packages/visit.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
package packages
import (
"fmt"
"os"
"sort"
)
// Visit visits all the packages in the import graph whose roots are
// pkgs, calling the optional pre function the first time each package
// is encountered (preorder), and the optional post function after a
// package's dependencies have been visited (postorder).
// The boolean result of pre(pkg) determines whether
// the imports of package pkg are visited.
func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
seen := make(map[*Package]bool)
var visit func(*Package)
visit = func(pkg *Package) {
if !seen[pkg] {
seen[pkg] = true
if pre == nil || pre(pkg) {
paths := make([]string, 0, len(pkg.Imports))
for path := range pkg.Imports {
paths = append(paths, path)
}
sort.Strings(paths) // Imports is a map, this makes visit stable
for _, path := range paths {
visit(pkg.Imports[path])
}
}
if post != nil {
post(pkg)
}
}
}
for _, pkg := range pkgs {
visit(pkg)
}
}
// PrintErrors prints to os.Stderr the accumulated errors of all
// packages in the import graph rooted at pkgs, dependencies first.
// PrintErrors returns the number of errors printed.
func PrintErrors(pkgs []*Package) int {
var n int
Visit(pkgs, nil, func(pkg *Package) {
for _, err := range pkg.Errors {
fmt.Fprintln(os.Stderr, err)
n++
}
})
return n
}

1259
vendor/golang.org/x/tools/imports/fix.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

315
vendor/golang.org/x/tools/imports/imports.go generated vendored Normal file
View File

@ -0,0 +1,315 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run mkstdlib.go
// Package imports implements a Go pretty-printer (like package "go/format")
// that also adds or removes import statements as necessary.
package imports // import "golang.org/x/tools/imports"
import (
"bufio"
"bytes"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/parser"
"go/printer"
"go/token"
"io"
"io/ioutil"
"regexp"
"strconv"
"strings"
"golang.org/x/tools/go/ast/astutil"
)
// Options specifies options for processing files.
type Options struct {
Fragment bool // Accept fragment of a source file (no package statement)
AllErrors bool // Report all errors (not just the first 10 on different lines)
Comments bool // Print comments (true if nil *Options provided)
TabIndent bool // Use tabs for indent (true if nil *Options provided)
TabWidth int // Tab width (8 if nil *Options provided)
FormatOnly bool // Disable the insertion and deletion of imports
}
// Process formats and adjusts imports for the provided file.
// If opt is nil the defaults are used.
//
// Note that filename's directory influences which imports can be chosen,
// so it is important that filename be accurate.
// To process data ``as if'' it were in filename, pass the data as a non-nil src.
func Process(filename string, src []byte, opt *Options) ([]byte, error) {
env := &fixEnv{GOPATH: build.Default.GOPATH, GOROOT: build.Default.GOROOT}
return process(filename, src, opt, env)
}
func process(filename string, src []byte, opt *Options, env *fixEnv) ([]byte, error) {
if opt == nil {
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
}
if src == nil {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
src = b
}
fileSet := token.NewFileSet()
file, adjust, err := parse(fileSet, filename, src, opt)
if err != nil {
return nil, err
}
if !opt.FormatOnly {
if err := fixImports(fileSet, file, filename, env); err != nil {
return nil, err
}
}
sortImports(fileSet, file)
imps := astutil.Imports(fileSet, file)
var spacesBefore []string // import paths we need spaces before
for _, impSection := range imps {
// Within each block of contiguous imports, see if any
// import lines are in different group numbers. If so,
// we'll need to put a space between them so it's
// compatible with gofmt.
lastGroup := -1
for _, importSpec := range impSection {
importPath, _ := strconv.Unquote(importSpec.Path.Value)
groupNum := importGroup(importPath)
if groupNum != lastGroup && lastGroup != -1 {
spacesBefore = append(spacesBefore, importPath)
}
lastGroup = groupNum
}
}
printerMode := printer.UseSpaces
if opt.TabIndent {
printerMode |= printer.TabIndent
}
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
var buf bytes.Buffer
err = printConfig.Fprint(&buf, fileSet, file)
if err != nil {
return nil, err
}
out := buf.Bytes()
if adjust != nil {
out = adjust(src, out)
}
if len(spacesBefore) > 0 {
out, err = addImportSpaces(bytes.NewReader(out), spacesBefore)
if err != nil {
return nil, err
}
}
out, err = format.Source(out)
if err != nil {
return nil, err
}
return out, nil
}
// parse parses src, which was read from filename,
// as a Go source file or statement list.
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
parserMode := parser.Mode(0)
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
// Try as whole source file.
file, err := parser.ParseFile(fset, filename, src, parserMode)
if err == nil {
return file, nil, nil
}
// If the error is that the source file didn't begin with a
// package line and we accept fragmented input, fall through to
// try as a source fragment. Stop and return on any other error.
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
return nil, nil, err
}
// If this is a declaration list, make it a source file
// by inserting a package clause.
// Insert using a ;, not a newline, so that parse errors are on
// the correct line.
const prefix = "package main;"
psrc := append([]byte(prefix), src...)
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
if err == nil {
// Gofmt will turn the ; into a \n.
// Do that ourselves now and update the file contents,
// so that positions and line numbers are correct going forward.
psrc[len(prefix)-1] = '\n'
fset.File(file.Package).SetLinesForContent(psrc)
// If a main function exists, we will assume this is a main
// package and leave the file.
if containsMainFunc(file) {
return file, nil, nil
}
adjust := func(orig, src []byte) []byte {
// Remove the package clause.
src = src[len(prefix):]
return matchSpace(orig, src)
}
return file, adjust, nil
}
// If the error is that the source file didn't begin with a
// declaration, fall through to try as a statement list.
// Stop and return on any other error.
if !strings.Contains(err.Error(), "expected declaration") {
return nil, nil, err
}
// If this is a statement list, make it a source file
// by inserting a package clause and turning the list
// into a function body. This handles expressions too.
// Insert using a ;, not a newline, so that the line numbers
// in fsrc match the ones in src.
fsrc := append(append([]byte("package p; func _() {"), src...), '}')
file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
if err == nil {
adjust := func(orig, src []byte) []byte {
// Remove the wrapping.
// Gofmt has turned the ; into a \n\n.
src = src[len("package p\n\nfunc _() {"):]
src = src[:len(src)-len("}\n")]
// Gofmt has also indented the function body one level.
// Remove that indent.
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
return matchSpace(orig, src)
}
return file, adjust, nil
}
// Failed, and out of options.
return nil, nil, err
}
// containsMainFunc checks if a file contains a function declaration with the
// function signature 'func main()'
func containsMainFunc(file *ast.File) bool {
for _, decl := range file.Decls {
if f, ok := decl.(*ast.FuncDecl); ok {
if f.Name.Name != "main" {
continue
}
if len(f.Type.Params.List) != 0 {
continue
}
if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
continue
}
return true
}
}
return false
}
func cutSpace(b []byte) (before, middle, after []byte) {
i := 0
for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
i++
}
j := len(b)
for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
j--
}
if i <= j {
return b[:i], b[i:j], b[j:]
}
return nil, nil, b[j:]
}
// matchSpace reformats src to use the same space context as orig.
// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
// 2) matchSpace copies the indentation of the first non-blank line in orig
// to every non-blank line in src.
// 3) matchSpace copies the trailing space from orig and uses it in place
// of src's trailing space.
func matchSpace(orig []byte, src []byte) []byte {
before, _, after := cutSpace(orig)
i := bytes.LastIndex(before, []byte{'\n'})
before, indent := before[:i+1], before[i+1:]
_, src, _ = cutSpace(src)
var b bytes.Buffer
b.Write(before)
for len(src) > 0 {
line := src
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, src = line[:i+1], line[i+1:]
} else {
src = nil
}
if len(line) > 0 && line[0] != '\n' { // not blank
b.Write(indent)
}
b.Write(line)
}
b.Write(after)
return b.Bytes()
}
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
func addImportSpaces(r io.Reader, breaks []string) ([]byte, error) {
var out bytes.Buffer
in := bufio.NewReader(r)
inImports := false
done := false
for {
s, err := in.ReadString('\n')
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
if !inImports && !done && strings.HasPrefix(s, "import") {
inImports = true
}
if inImports && (strings.HasPrefix(s, "var") ||
strings.HasPrefix(s, "func") ||
strings.HasPrefix(s, "const") ||
strings.HasPrefix(s, "type")) {
done = true
inImports = false
}
if inImports && len(breaks) > 0 {
if m := impLine.FindStringSubmatch(s); m != nil {
if m[1] == breaks[0] {
out.WriteByte('\n')
breaks = breaks[1:]
}
}
}
fmt.Fprint(&out, s)
}
return out.Bytes(), nil
}

173
vendor/golang.org/x/tools/imports/mkindex.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
// +build ignore
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Command mkindex creates the file "pkgindex.go" containing an index of the Go
// standard library. The file is intended to be built as part of the imports
// package, so that the package may be used in environments where a GOROOT is
// not available (such as App Engine).
package main
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
)
var (
pkgIndex = make(map[string][]pkg)
exports = make(map[string]map[string]bool)
)
func main() {
// Don't use GOPATH.
ctx := build.Default
ctx.GOPATH = ""
// Populate pkgIndex global from GOROOT.
for _, path := range ctx.SrcDirs() {
f, err := os.Open(path)
if err != nil {
log.Print(err)
continue
}
children, err := f.Readdir(-1)
f.Close()
if err != nil {
log.Print(err)
continue
}
for _, child := range children {
if child.IsDir() {
loadPkg(path, child.Name())
}
}
}
// Populate exports global.
for _, ps := range pkgIndex {
for _, p := range ps {
e := loadExports(p.dir)
if e != nil {
exports[p.dir] = e
}
}
}
// Construct source file.
var buf bytes.Buffer
fmt.Fprint(&buf, pkgIndexHead)
fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex)
fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports)
src := buf.Bytes()
// Replace main.pkg type name with pkg.
src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1)
// Replace actual GOROOT with "/go".
src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1)
// Add some line wrapping.
src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1)
src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1)
var err error
src, err = format.Source(src)
if err != nil {
log.Fatal(err)
}
// Write out source file.
err = ioutil.WriteFile("pkgindex.go", src, 0644)
if err != nil {
log.Fatal(err)
}
}
const pkgIndexHead = `package imports
func init() {
pkgIndexOnce.Do(func() {
pkgIndex.m = pkgIndexMaster
})
loadExports = func(dir string) map[string]bool {
return exportsMaster[dir]
}
}
`
type pkg struct {
importpath string // full pkg import path, e.g. "net/http"
dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt"
}
var fset = token.NewFileSet()
func loadPkg(root, importpath string) {
shortName := path.Base(importpath)
if shortName == "testdata" {
return
}
dir := filepath.Join(root, importpath)
pkgIndex[shortName] = append(pkgIndex[shortName], pkg{
importpath: importpath,
dir: dir,
})
pkgDir, err := os.Open(dir)
if err != nil {
return
}
children, err := pkgDir.Readdir(-1)
pkgDir.Close()
if err != nil {
return
}
for _, child := range children {
name := child.Name()
if name == "" {
continue
}
if c := name[0]; c == '.' || ('0' <= c && c <= '9') {
continue
}
if child.IsDir() {
loadPkg(root, filepath.Join(importpath, name))
}
}
}
func loadExports(dir string) map[string]bool {
exports := make(map[string]bool)
buildPkg, err := build.ImportDir(dir, 0)
if err != nil {
if strings.Contains(err.Error(), "no buildable Go source files in") {
return nil
}
log.Printf("could not import %q: %v", dir, err)
return nil
}
for _, file := range buildPkg.GoFiles {
f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0)
if err != nil {
log.Printf("could not parse %q: %v", file, err)
continue
}
for name := range f.Scope.Objects {
if ast.IsExported(name) {
exports[name] = true
}
}
}
return exports
}

112
vendor/golang.org/x/tools/imports/mkstdlib.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
// +build ignore
// mkstdlib generates the zstdlib.go file, containing the Go standard
// library API symbols. It's baked into the binary to avoid scanning
// GOPATH in the common case.
package main
import (
"bufio"
"bytes"
"fmt"
"go/format"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
)
func mustOpen(name string) io.Reader {
f, err := os.Open(name)
if err != nil {
log.Fatal(err)
}
return f
}
func api(base string) string {
return filepath.Join(runtime.GOROOT(), "api", base)
}
var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`)
var unsafeSyms = map[string]bool{"Alignof": true, "ArbitraryType": true, "Offsetof": true, "Pointer": true, "Sizeof": true}
func main() {
var buf bytes.Buffer
outf := func(format string, args ...interface{}) {
fmt.Fprintf(&buf, format, args...)
}
outf("// Code generated by mkstdlib.go. DO NOT EDIT.\n\n")
outf("package imports\n")
outf("var stdlib = map[string]map[string]bool{\n")
f := io.MultiReader(
mustOpen(api("go1.txt")),
mustOpen(api("go1.1.txt")),
mustOpen(api("go1.2.txt")),
mustOpen(api("go1.3.txt")),
mustOpen(api("go1.4.txt")),
mustOpen(api("go1.5.txt")),
mustOpen(api("go1.6.txt")),
mustOpen(api("go1.7.txt")),
mustOpen(api("go1.8.txt")),
mustOpen(api("go1.9.txt")),
mustOpen(api("go1.10.txt")),
mustOpen(api("go1.11.txt")),
mustOpen(api("go1.12.txt")),
)
sc := bufio.NewScanner(f)
pkgs := map[string]map[string]bool{
"unsafe": unsafeSyms,
}
paths := []string{"unsafe"}
for sc.Scan() {
l := sc.Text()
has := func(v string) bool { return strings.Contains(l, v) }
if has("struct, ") || has("interface, ") || has(", method (") {
continue
}
if m := sym.FindStringSubmatch(l); m != nil {
path, sym := m[1], m[2]
if _, ok := pkgs[path]; !ok {
pkgs[path] = map[string]bool{}
paths = append(paths, path)
}
pkgs[path][sym] = true
}
}
if err := sc.Err(); err != nil {
log.Fatal(err)
}
sort.Strings(paths)
for _, path := range paths {
outf("\t%q: map[string]bool{\n", path)
pkg := pkgs[path]
var syms []string
for sym := range pkg {
syms = append(syms, sym)
}
sort.Strings(syms)
for _, sym := range syms {
outf("\t\t%q: true,\n", sym)
}
outf("},\n")
}
outf("}\n")
fmtbuf, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
err = ioutil.WriteFile("zstdlib.go", fmtbuf, 0666)
if err != nil {
log.Fatal(err)
}
}

355
vendor/golang.org/x/tools/imports/mod.go generated vendored Normal file
View File

@ -0,0 +1,355 @@
package imports
import (
"bytes"
"encoding/json"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/tools/internal/gopathwalk"
"golang.org/x/tools/internal/module"
)
// moduleResolver implements resolver for modules using the go command as little
// as feasible.
type moduleResolver struct {
env *fixEnv
initialized bool
main *moduleJSON
modsByModPath []*moduleJSON // All modules, ordered by # of path components in module Path...
modsByDir []*moduleJSON // ...or Dir.
}
type moduleJSON struct {
Path string // module path
Version string // module version
Versions []string // available module versions (with -versions)
Replace *moduleJSON // replaced by this module
Time *time.Time // time version was created
Update *moduleJSON // available update, if any (with -u)
Main bool // is this the main module?
Indirect bool // is this module only an indirect dependency of main module?
Dir string // directory holding files for this module, if any
GoMod string // path to go.mod file for this module, if any
Error *moduleErrorJSON // error loading module
}
type moduleErrorJSON struct {
Err string // the error itself
}
func (r *moduleResolver) init() error {
if r.initialized {
return nil
}
stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
if err != nil {
return err
}
for dec := json.NewDecoder(stdout); dec.More(); {
mod := &moduleJSON{}
if err := dec.Decode(mod); err != nil {
return err
}
if mod.Dir == "" {
if Debug {
log.Printf("module %v has not been downloaded and will be ignored", mod.Path)
}
// Can't do anything with a module that's not downloaded.
continue
}
r.modsByModPath = append(r.modsByModPath, mod)
r.modsByDir = append(r.modsByDir, mod)
if mod.Main {
r.main = mod
}
}
sort.Slice(r.modsByModPath, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.modsByModPath[x].Path, "/")
}
return count(j) < count(i) // descending order
})
sort.Slice(r.modsByDir, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.modsByDir[x].Dir, "/")
}
return count(j) < count(i) // descending order
})
r.initialized = true
return nil
}
// findPackage returns the module and directory that contains the package at
// the given import path, or returns nil, "" if no module is in scope.
func (r *moduleResolver) findPackage(importPath string) (*moduleJSON, string) {
for _, m := range r.modsByModPath {
if !strings.HasPrefix(importPath, m.Path) {
continue
}
pathInModule := importPath[len(m.Path):]
pkgDir := filepath.Join(m.Dir, pathInModule)
if dirIsNestedModule(pkgDir, m) {
continue
}
pkgFiles, err := ioutil.ReadDir(pkgDir)
if err != nil {
continue
}
// A module only contains a package if it has buildable go
// files in that directory. If not, it could be provided by an
// outer module. See #29736.
for _, fi := range pkgFiles {
if ok, _ := r.env.buildContext().MatchFile(pkgDir, fi.Name()); ok {
return m, pkgDir
}
}
}
return nil, ""
}
// findModuleByDir returns the module that contains dir, or nil if no such
// module is in scope.
func (r *moduleResolver) findModuleByDir(dir string) *moduleJSON {
// This is quite tricky and may not be correct. dir could be:
// - a package in the main module.
// - a replace target underneath the main module's directory.
// - a nested module in the above.
// - a replace target somewhere totally random.
// - a nested module in the above.
// - in the mod cache.
// - in /vendor/ in -mod=vendor mode.
// - nested module? Dunno.
// Rumor has it that replace targets cannot contain other replace targets.
for _, m := range r.modsByDir {
if !strings.HasPrefix(dir, m.Dir) {
continue
}
if dirIsNestedModule(dir, m) {
continue
}
return m
}
return nil
}
// dirIsNestedModule reports if dir is contained in a nested module underneath
// mod, not actually in mod.
func dirIsNestedModule(dir string, mod *moduleJSON) bool {
if !strings.HasPrefix(dir, mod.Dir) {
return false
}
mf := findModFile(dir)
if mf == "" {
return false
}
return filepath.Dir(mf) != mod.Dir
}
func findModFile(dir string) string {
for {
f := filepath.Join(dir, "go.mod")
info, err := os.Stat(f)
if err == nil && !info.IsDir() {
return f
}
d := filepath.Dir(dir)
if len(d) >= len(dir) {
return "" // reached top of file system, no go.mod
}
dir = d
}
}
func (r *moduleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
if err := r.init(); err != nil {
return nil, err
}
names := map[string]string{}
for _, path := range importPaths {
_, packageDir := r.findPackage(path)
if packageDir == "" {
continue
}
name, err := packageDirToName(packageDir)
if err != nil {
continue
}
names[path] = name
}
return names, nil
}
func (r *moduleResolver) scan(_ references) ([]*pkg, error) {
if err := r.init(); err != nil {
return nil, err
}
// Walk GOROOT, GOPATH/pkg/mod, and the main module.
roots := []gopathwalk.Root{
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
}
if r.main != nil {
roots = append(roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
}
for _, p := range filepath.SplitList(r.env.GOPATH) {
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
}
// Walk replace targets, just in case they're not in any of the above.
for _, mod := range r.modsByModPath {
if mod.Replace != nil {
roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
}
}
var result []*pkg
dupCheck := make(map[string]bool)
var mu sync.Mutex
gopathwalk.Walk(roots, func(root gopathwalk.Root, dir string) {
mu.Lock()
defer mu.Unlock()
if _, dup := dupCheck[dir]; dup {
return
}
dupCheck[dir] = true
subdir := ""
if dir != root.Path {
subdir = dir[len(root.Path)+len("/"):]
}
importPath := filepath.ToSlash(subdir)
if strings.HasPrefix(importPath, "vendor/") {
// Ignore vendor dirs. If -mod=vendor is on, then things
// should mostly just work, but when it's not vendor/
// is a mess. There's no easy way to tell if it's on.
// We can still find things in the mod cache and
// map them into /vendor when -mod=vendor is on.
return
}
switch root.Type {
case gopathwalk.RootCurrentModule:
importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
case gopathwalk.RootModuleCache:
matches := modCacheRegexp.FindStringSubmatch(subdir)
modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
if err != nil {
if Debug {
log.Printf("decoding module cache path %q: %v", subdir, err)
}
return
}
importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
case gopathwalk.RootGOROOT:
importPath = subdir
}
// Check if the directory is underneath a module that's in scope.
if mod := r.findModuleByDir(dir); mod != nil {
// It is. If dir is the target of a replace directive,
// our guessed import path is wrong. Use the real one.
if mod.Dir == dir {
importPath = mod.Path
} else {
dirInMod := dir[len(mod.Dir)+len("/"):]
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
}
} else {
// The package is in an unknown module. Check that it's
// not obviously impossible to import.
var modFile string
switch root.Type {
case gopathwalk.RootModuleCache:
matches := modCacheRegexp.FindStringSubmatch(subdir)
modFile = filepath.Join(matches[1], "@", matches[2], "go.mod")
default:
modFile = findModFile(dir)
}
modBytes, err := ioutil.ReadFile(modFile)
if err == nil && !strings.HasPrefix(importPath, modulePath(modBytes)) {
// The module's declared path does not match
// its expected path. It probably needs a
// replace directive we don't have.
return
}
}
// We may have discovered a package that has a different version
// in scope already. Canonicalize to that one if possible.
if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
dir = canonicalDir
}
result = append(result, &pkg{
importPathShort: VendorlessPath(importPath),
dir: dir,
})
}, gopathwalk.Options{Debug: Debug, ModulesEnabled: true})
return result, nil
}
// modCacheRegexp splits a path in a module cache into module, module version, and package.
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
var (
slashSlash = []byte("//")
moduleStr = []byte("module")
)
// modulePath returns the module path from the gomod file text.
// If it cannot find a module path, it returns an empty string.
// It is tolerant of unrelated problems in the go.mod file.
//
// Copied from cmd/go/internal/modfile.
func modulePath(mod []byte) string {
for len(mod) > 0 {
line := mod
mod = nil
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, mod = line[:i], line[i+1:]
}
if i := bytes.Index(line, slashSlash); i >= 0 {
line = line[:i]
}
line = bytes.TrimSpace(line)
if !bytes.HasPrefix(line, moduleStr) {
continue
}
line = line[len(moduleStr):]
n := len(line)
line = bytes.TrimSpace(line)
if len(line) == n || len(line) == 0 {
continue
}
if line[0] == '"' || line[0] == '`' {
p, err := strconv.Unquote(string(line))
if err != nil {
return "" // malformed quoted string or multiline module path
}
return p
}
return string(line)
}
return "" // missing module path
}

230
vendor/golang.org/x/tools/imports/sortimports.go generated vendored Normal file
View File

@ -0,0 +1,230 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Hacked up copy of go/ast/import.go
package imports
import (
"go/ast"
"go/token"
"sort"
"strconv"
)
// sortImports sorts runs of consecutive import lines in import blocks in f.
// It also removes duplicate imports when it is possible to do so without data loss.
func sortImports(fset *token.FileSet, f *ast.File) {
for i, d := range f.Decls {
d, ok := d.(*ast.GenDecl)
if !ok || d.Tok != token.IMPORT {
// Not an import declaration, so we're done.
// Imports are always first.
break
}
if len(d.Specs) == 0 {
// Empty import block, remove it.
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
}
if !d.Lparen.IsValid() {
// Not a block: sorted by default.
continue
}
// Identify and sort runs of specs on successive lines.
i := 0
specs := d.Specs[:0]
for j, s := range d.Specs {
if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
// j begins a new run. End this one.
specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
i = j
}
}
specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
d.Specs = specs
// Deduping can leave a blank line before the rparen; clean that up.
if len(d.Specs) > 0 {
lastSpec := d.Specs[len(d.Specs)-1]
lastLine := fset.Position(lastSpec.Pos()).Line
if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
fset.File(d.Rparen).MergeLine(rParenLine - 1)
}
}
}
}
func importPath(s ast.Spec) string {
t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
if err == nil {
return t
}
return ""
}
func importName(s ast.Spec) string {
n := s.(*ast.ImportSpec).Name
if n == nil {
return ""
}
return n.Name
}
func importComment(s ast.Spec) string {
c := s.(*ast.ImportSpec).Comment
if c == nil {
return ""
}
return c.Text()
}
// collapse indicates whether prev may be removed, leaving only next.
func collapse(prev, next ast.Spec) bool {
if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
return false
}
return prev.(*ast.ImportSpec).Comment == nil
}
type posSpan struct {
Start token.Pos
End token.Pos
}
func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
// Can't short-circuit here even if specs are already sorted,
// since they might yet need deduplication.
// A lone import, however, may be safely ignored.
if len(specs) <= 1 {
return specs
}
// Record positions for specs.
pos := make([]posSpan, len(specs))
for i, s := range specs {
pos[i] = posSpan{s.Pos(), s.End()}
}
// Identify comments in this range.
// Any comment from pos[0].Start to the final line counts.
lastLine := fset.Position(pos[len(pos)-1].End).Line
cstart := len(f.Comments)
cend := len(f.Comments)
for i, g := range f.Comments {
if g.Pos() < pos[0].Start {
continue
}
if i < cstart {
cstart = i
}
if fset.Position(g.End()).Line > lastLine {
cend = i
break
}
}
comments := f.Comments[cstart:cend]
// Assign each comment to the import spec preceding it.
importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
specIndex := 0
for _, g := range comments {
for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
specIndex++
}
s := specs[specIndex].(*ast.ImportSpec)
importComment[s] = append(importComment[s], g)
}
// Sort the import specs by import path.
// Remove duplicates, when possible without data loss.
// Reassign the import paths to have the same position sequence.
// Reassign each comment to abut the end of its spec.
// Sort the comments by new position.
sort.Sort(byImportSpec(specs))
// Dedup. Thanks to our sorting, we can just consider
// adjacent pairs of imports.
deduped := specs[:0]
for i, s := range specs {
if i == len(specs)-1 || !collapse(s, specs[i+1]) {
deduped = append(deduped, s)
} else {
p := s.Pos()
fset.File(p).MergeLine(fset.Position(p).Line)
}
}
specs = deduped
// Fix up comment positions
for i, s := range specs {
s := s.(*ast.ImportSpec)
if s.Name != nil {
s.Name.NamePos = pos[i].Start
}
s.Path.ValuePos = pos[i].Start
s.EndPos = pos[i].End
nextSpecPos := pos[i].End
for _, g := range importComment[s] {
for _, c := range g.List {
c.Slash = pos[i].End
nextSpecPos = c.End()
}
}
if i < len(specs)-1 {
pos[i+1].Start = nextSpecPos
pos[i+1].End = nextSpecPos
}
}
sort.Sort(byCommentPos(comments))
// Fixup comments can insert blank lines, because import specs are on different lines.
// We remove those blank lines here by merging import spec to the first import spec line.
firstSpecLine := fset.Position(specs[0].Pos()).Line
for _, s := range specs[1:] {
p := s.Pos()
line := fset.File(p).Line(p)
for previousLine := line - 1; previousLine >= firstSpecLine; {
fset.File(p).MergeLine(previousLine)
previousLine--
}
}
return specs
}
type byImportSpec []ast.Spec // slice of *ast.ImportSpec
func (x byImportSpec) Len() int { return len(x) }
func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byImportSpec) Less(i, j int) bool {
ipath := importPath(x[i])
jpath := importPath(x[j])
igroup := importGroup(ipath)
jgroup := importGroup(jpath)
if igroup != jgroup {
return igroup < jgroup
}
if ipath != jpath {
return ipath < jpath
}
iname := importName(x[i])
jname := importName(x[j])
if iname != jname {
return iname < jname
}
return importComment(x[i]) < importComment(x[j])
}
type byCommentPos []*ast.CommentGroup
func (x byCommentPos) Len() int { return len(x) }
func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }

10302
vendor/golang.org/x/tools/imports/zstdlib.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

196
vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go generated vendored Normal file
View File

@ -0,0 +1,196 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fastwalk provides a faster version of filepath.Walk for file system
// scanning tools.
package fastwalk
import (
"errors"
"os"
"path/filepath"
"runtime"
"sync"
)
// TraverseLink is used as a return value from WalkFuncs to indicate that the
// symlink named in the call may be traversed.
var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
// SkipFiles is a used as a return value from WalkFuncs to indicate that the
// callback should not be called for any other files in the current directory.
// Child directories will still be traversed.
var SkipFiles = errors.New("fastwalk: skip remaining files in directory")
// Walk is a faster implementation of filepath.Walk.
//
// filepath.Walk's design necessarily calls os.Lstat on each file,
// even if the caller needs less info.
// Many tools need only the type of each file.
// On some platforms, this information is provided directly by the readdir
// system call, avoiding the need to stat each file individually.
// fastwalk_unix.go contains a fork of the syscall routines.
//
// See golang.org/issue/16399
//
// Walk walks the file tree rooted at root, calling walkFn for
// each file or directory in the tree, including root.
//
// If fastWalk returns filepath.SkipDir, the directory is skipped.
//
// Unlike filepath.Walk:
// * file stat calls must be done by the user.
// The only provided metadata is the file type, which does not include
// any permission bits.
// * multiple goroutines stat the filesystem concurrently. The provided
// walkFn must be safe for concurrent use.
// * fastWalk can follow symlinks if walkFn returns the TraverseLink
// sentinel error. It is the walkFn's responsibility to prevent
// fastWalk from going into symlink cycles.
func Walk(root string, walkFn func(path string, typ os.FileMode) error) error {
// TODO(bradfitz): make numWorkers configurable? We used a
// minimum of 4 to give the kernel more info about multiple
// things we want, in hopes its I/O scheduling can take
// advantage of that. Hopefully most are in cache. Maybe 4 is
// even too low of a minimum. Profile more.
numWorkers := 4
if n := runtime.NumCPU(); n > numWorkers {
numWorkers = n
}
// Make sure to wait for all workers to finish, otherwise
// walkFn could still be called after returning. This Wait call
// runs after close(e.donec) below.
var wg sync.WaitGroup
defer wg.Wait()
w := &walker{
fn: walkFn,
enqueuec: make(chan walkItem, numWorkers), // buffered for performance
workc: make(chan walkItem, numWorkers), // buffered for performance
donec: make(chan struct{}),
// buffered for correctness & not leaking goroutines:
resc: make(chan error, numWorkers),
}
defer close(w.donec)
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go w.doWork(&wg)
}
todo := []walkItem{{dir: root}}
out := 0
for {
workc := w.workc
var workItem walkItem
if len(todo) == 0 {
workc = nil
} else {
workItem = todo[len(todo)-1]
}
select {
case workc <- workItem:
todo = todo[:len(todo)-1]
out++
case it := <-w.enqueuec:
todo = append(todo, it)
case err := <-w.resc:
out--
if err != nil {
return err
}
if out == 0 && len(todo) == 0 {
// It's safe to quit here, as long as the buffered
// enqueue channel isn't also readable, which might
// happen if the worker sends both another unit of
// work and its result before the other select was
// scheduled and both w.resc and w.enqueuec were
// readable.
select {
case it := <-w.enqueuec:
todo = append(todo, it)
default:
return nil
}
}
}
}
}
// doWork reads directories as instructed (via workc) and runs the
// user's callback function.
func (w *walker) doWork(wg *sync.WaitGroup) {
defer wg.Done()
for {
select {
case <-w.donec:
return
case it := <-w.workc:
select {
case <-w.donec:
return
case w.resc <- w.walk(it.dir, !it.callbackDone):
}
}
}
}
type walker struct {
fn func(path string, typ os.FileMode) error
donec chan struct{} // closed on fastWalk's return
workc chan walkItem // to workers
enqueuec chan walkItem // from workers
resc chan error // from workers
}
type walkItem struct {
dir string
callbackDone bool // callback already called; don't do it again
}
func (w *walker) enqueue(it walkItem) {
select {
case w.enqueuec <- it:
case <-w.donec:
}
}
func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
joined := dirName + string(os.PathSeparator) + baseName
if typ == os.ModeDir {
w.enqueue(walkItem{dir: joined})
return nil
}
err := w.fn(joined, typ)
if typ == os.ModeSymlink {
if err == TraverseLink {
// Set callbackDone so we don't call it twice for both the
// symlink-as-symlink and the symlink-as-directory later:
w.enqueue(walkItem{dir: joined, callbackDone: true})
return nil
}
if err == filepath.SkipDir {
// Permit SkipDir on symlinks too.
return nil
}
}
return err
}
func (w *walker) walk(root string, runUserCallback bool) error {
if runUserCallback {
err := w.fn(root, os.ModeDir)
if err == filepath.SkipDir {
return nil
}
if err != nil {
return err
}
}
return readDir(root, w.onDirEnt)
}

View File

@ -0,0 +1,13 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd
package fastwalk
import "syscall"
func direntInode(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Fileno)
}

View File

@ -0,0 +1,14 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin
// +build !appengine
package fastwalk
import "syscall"
func direntInode(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Ino)
}

View File

@ -0,0 +1,13 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd openbsd netbsd
package fastwalk
import "syscall"
func direntNamlen(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Namlen)
}

View File

@ -0,0 +1,29 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
// +build !appengine
package fastwalk
import (
"bytes"
"syscall"
"unsafe"
)
func direntNamlen(dirent *syscall.Dirent) uint64 {
const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name))
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
const nameBufLen = uint16(len(nameBuf))
limit := dirent.Reclen - fixedHdr
if limit > nameBufLen {
limit = nameBufLen
}
nameLen := bytes.IndexByte(nameBuf[:limit], 0)
if nameLen < 0 {
panic("failed to find terminating 0 byte in dirent")
}
return uint64(nameLen)
}

View File

@ -0,0 +1,37 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd
package fastwalk
import (
"io/ioutil"
"os"
)
// readDir calls fn for each directory entry in dirName.
// It does not descend into directories or follow symlinks.
// If fn returns a non-nil error, readDir returns with that error
// immediately.
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
fis, err := ioutil.ReadDir(dirName)
if err != nil {
return err
}
skipFiles := false
for _, fi := range fis {
if fi.Mode().IsRegular() && skipFiles {
continue
}
if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
if err == SkipFiles {
skipFiles = true
continue
}
return err
}
}
return nil
}

View File

@ -0,0 +1,127 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd
// +build !appengine
package fastwalk
import (
"fmt"
"os"
"syscall"
"unsafe"
)
const blockSize = 8 << 10
// unknownFileMode is a sentinel (and bogus) os.FileMode
// value used to represent a syscall.DT_UNKNOWN Dirent.Type.
const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
fd, err := syscall.Open(dirName, 0, 0)
if err != nil {
return &os.PathError{Op: "open", Path: dirName, Err: err}
}
defer syscall.Close(fd)
// The buffer must be at least a block long.
buf := make([]byte, blockSize) // stack-allocated; doesn't escape
bufp := 0 // starting read position in buf
nbuf := 0 // end valid data in buf
skipFiles := false
for {
if bufp >= nbuf {
bufp = 0
nbuf, err = syscall.ReadDirent(fd, buf)
if err != nil {
return os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
return nil
}
}
consumed, name, typ := parseDirEnt(buf[bufp:nbuf])
bufp += consumed
if name == "" || name == "." || name == ".." {
continue
}
// Fallback for filesystems (like old XFS) that don't
// support Dirent.Type and have DT_UNKNOWN (0) there
// instead.
if typ == unknownFileMode {
fi, err := os.Lstat(dirName + "/" + name)
if err != nil {
// It got deleted in the meantime.
if os.IsNotExist(err) {
continue
}
return err
}
typ = fi.Mode() & os.ModeType
}
if skipFiles && typ.IsRegular() {
continue
}
if err := fn(dirName, name, typ); err != nil {
if err == SkipFiles {
skipFiles = true
continue
}
return err
}
}
}
func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
// golang.org/issue/15653
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
}
if len(buf) < int(dirent.Reclen) {
panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen))
}
consumed = int(dirent.Reclen)
if direntInode(dirent) == 0 { // File absent in directory.
return
}
switch dirent.Type {
case syscall.DT_REG:
typ = 0
case syscall.DT_DIR:
typ = os.ModeDir
case syscall.DT_LNK:
typ = os.ModeSymlink
case syscall.DT_BLK:
typ = os.ModeDevice
case syscall.DT_FIFO:
typ = os.ModeNamedPipe
case syscall.DT_SOCK:
typ = os.ModeSocket
case syscall.DT_UNKNOWN:
typ = unknownFileMode
default:
// Skip weird things.
// It's probably a DT_WHT (http://lwn.net/Articles/325369/)
// or something. Revisit if/when this package is moved outside
// of goimports. goimports only cares about regular files,
// symlinks, and directories.
return
}
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
nameLen := direntNamlen(dirent)
// Special cases for common things:
if nameLen == 1 && nameBuf[0] == '.' {
name = "."
} else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' {
name = ".."
} else {
name = string(nameBuf[:nameLen])
}
return
}

250
vendor/golang.org/x/tools/internal/gopathwalk/walk.go generated vendored Normal file
View File

@ -0,0 +1,250 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gopathwalk is like filepath.Walk but specialized for finding Go
// packages, particularly in $GOPATH and $GOROOT.
package gopathwalk
import (
"bufio"
"bytes"
"fmt"
"go/build"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"golang.org/x/tools/internal/fastwalk"
)
// Options controls the behavior of a Walk call.
type Options struct {
Debug bool // Enable debug logging
ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules.
}
// RootType indicates the type of a Root.
type RootType int
const (
RootUnknown RootType = iota
RootGOROOT
RootGOPATH
RootCurrentModule
RootModuleCache
RootOther
)
// A Root is a starting point for a Walk.
type Root struct {
Path string
Type RootType
}
// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible.
func SrcDirsRoots(ctx *build.Context) []Root {
var roots []Root
roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT})
for _, p := range filepath.SplitList(ctx.GOPATH) {
roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH})
}
return roots
}
// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
// For each package found, add will be called (concurrently) with the absolute
// paths of the containing source directory and the package directory.
// add will be called concurrently.
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
for _, root := range roots {
walkDir(root, add, opts)
}
}
func walkDir(root Root, add func(Root, string), opts Options) {
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
if opts.Debug {
log.Printf("skipping nonexistant directory: %v", root.Path)
}
return
}
if opts.Debug {
log.Printf("scanning %s", root.Path)
}
w := &walker{
root: root,
add: add,
opts: opts,
}
w.init()
if err := fastwalk.Walk(root.Path, w.walk); err != nil {
log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
}
if opts.Debug {
log.Printf("scanned %s", root.Path)
}
}
// walker is the callback for fastwalk.Walk.
type walker struct {
root Root // The source directory to scan.
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
opts Options // Options passed to Walk by the user.
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
}
// init initializes the walker based on its Options.
func (w *walker) init() {
var ignoredPaths []string
if w.root.Type == RootModuleCache {
ignoredPaths = []string{"cache"}
}
if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH {
ignoredPaths = w.getIgnoredDirs(w.root.Path)
ignoredPaths = append(ignoredPaths, "v", "mod")
}
for _, p := range ignoredPaths {
full := filepath.Join(w.root.Path, p)
if fi, err := os.Stat(full); err == nil {
w.ignoredDirs = append(w.ignoredDirs, fi)
if w.opts.Debug {
log.Printf("Directory added to ignore list: %s", full)
}
} else if w.opts.Debug {
log.Printf("Error statting ignored directory: %v", err)
}
}
}
// getIgnoredDirs reads an optional config file at <path>/.goimportsignore
// of relative directories to ignore when scanning for go files.
// The provided path is one of the $GOPATH entries with "src" appended.
func (w *walker) getIgnoredDirs(path string) []string {
file := filepath.Join(path, ".goimportsignore")
slurp, err := ioutil.ReadFile(file)
if w.opts.Debug {
if err != nil {
log.Print(err)
} else {
log.Printf("Read %s", file)
}
}
if err != nil {
return nil
}
var ignoredDirs []string
bs := bufio.NewScanner(bytes.NewReader(slurp))
for bs.Scan() {
line := strings.TrimSpace(bs.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
ignoredDirs = append(ignoredDirs, line)
}
return ignoredDirs
}
func (w *walker) shouldSkipDir(fi os.FileInfo) bool {
for _, ignoredDir := range w.ignoredDirs {
if os.SameFile(fi, ignoredDir) {
return true
}
}
return false
}
func (w *walker) walk(path string, typ os.FileMode) error {
dir := filepath.Dir(path)
if typ.IsRegular() {
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
// Doesn't make sense to have regular files
// directly in your $GOPATH/src or $GOROOT/src.
return fastwalk.SkipFiles
}
if !strings.HasSuffix(path, ".go") {
return nil
}
w.add(w.root, dir)
return fastwalk.SkipFiles
}
if typ == os.ModeDir {
base := filepath.Base(path)
if base == "" || base[0] == '.' || base[0] == '_' ||
base == "testdata" ||
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
(!w.opts.ModulesEnabled && base == "node_modules") {
return filepath.SkipDir
}
fi, err := os.Lstat(path)
if err == nil && w.shouldSkipDir(fi) {
return filepath.SkipDir
}
return nil
}
if typ == os.ModeSymlink {
base := filepath.Base(path)
if strings.HasPrefix(base, ".#") {
// Emacs noise.
return nil
}
fi, err := os.Lstat(path)
if err != nil {
// Just ignore it.
return nil
}
if w.shouldTraverse(dir, fi) {
return fastwalk.TraverseLink
}
}
return nil
}
// shouldTraverse reports whether the symlink fi, found in dir,
// should be followed. It makes sure symlinks were never visited
// before to avoid symlink loops.
func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
path := filepath.Join(dir, fi.Name())
target, err := filepath.EvalSymlinks(path)
if err != nil {
return false
}
ts, err := os.Stat(target)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return false
}
if !ts.IsDir() {
return false
}
if w.shouldSkipDir(ts) {
return false
}
// Check for symlink loops by statting each directory component
// and seeing if any are the same file as ts.
for {
parent := filepath.Dir(path)
if parent == path {
// Made it to the root without seeing a cycle.
// Use this symlink.
return true
}
parentInfo, err := os.Stat(parent)
if err != nil {
return false
}
if os.SameFile(ts, parentInfo) {
// Cycle. Don't traverse.
return false
}
path = parent
}
}

540
vendor/golang.org/x/tools/internal/module/module.go generated vendored Normal file
View File

@ -0,0 +1,540 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package module defines the module.Version type
// along with support code.
package module
// IMPORTANT NOTE
//
// This file essentially defines the set of valid import paths for the go command.
// There are many subtle considerations, including Unicode ambiguity,
// security, network, and file system representations.
//
// This file also defines the set of valid module path and version combinations,
// another topic with many subtle considerations.
//
// Changes to the semantics in this file require approval from rsc.
import (
"fmt"
"sort"
"strings"
"unicode"
"unicode/utf8"
"golang.org/x/tools/internal/semver"
)
// A Version is defined by a module path and version pair.
type Version struct {
Path string
// Version is usually a semantic version in canonical form.
// There are two exceptions to this general rule.
// First, the top-level target of a build has no specific version
// and uses Version = "".
// Second, during MVS calculations the version "none" is used
// to represent the decision to take no version of a given module.
Version string `json:",omitempty"`
}
// Check checks that a given module path, version pair is valid.
// In addition to the path being a valid module path
// and the version being a valid semantic version,
// the two must correspond.
// For example, the path "yaml/v2" only corresponds to
// semantic versions beginning with "v2.".
func Check(path, version string) error {
if err := CheckPath(path); err != nil {
return err
}
if !semver.IsValid(version) {
return fmt.Errorf("malformed semantic version %v", version)
}
_, pathMajor, _ := SplitPathVersion(path)
if !MatchPathMajor(version, pathMajor) {
if pathMajor == "" {
pathMajor = "v0 or v1"
}
if pathMajor[0] == '.' { // .v1
pathMajor = pathMajor[1:]
}
return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor)
}
return nil
}
// firstPathOK reports whether r can appear in the first element of a module path.
// The first element of the path must be an LDH domain name, at least for now.
// To avoid case ambiguity, the domain name must be entirely lower case.
func firstPathOK(r rune) bool {
return r == '-' || r == '.' ||
'0' <= r && r <= '9' ||
'a' <= r && r <= 'z'
}
// pathOK reports whether r can appear in an import path element.
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
// This matches what "go get" has historically recognized in import paths.
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
// care in the safe encoding (see note below).
func pathOK(r rune) bool {
if r < utf8.RuneSelf {
return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
'0' <= r && r <= '9' ||
'A' <= r && r <= 'Z' ||
'a' <= r && r <= 'z'
}
return false
}
// fileNameOK reports whether r can appear in a file name.
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
// If we expand the set of allowed characters here, we have to
// work harder at detecting potential case-folding and normalization collisions.
// See note about "safe encoding" below.
func fileNameOK(r rune) bool {
if r < utf8.RuneSelf {
// Entire set of ASCII punctuation, from which we remove characters:
// ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
// We disallow some shell special characters: " ' * < > ? ` |
// (Note that some of those are disallowed by the Windows file system as well.)
// We also disallow path separators / : and \ (fileNameOK is only called on path element characters).
// We allow spaces (U+0020) in file names.
const allowed = "!#$%&()+,-.=@[]^_{}~ "
if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' {
return true
}
for i := 0; i < len(allowed); i++ {
if rune(allowed[i]) == r {
return true
}
}
return false
}
// It may be OK to add more ASCII punctuation here, but only carefully.
// For example Windows disallows < > \, and macOS disallows :, so we must not allow those.
return unicode.IsLetter(r)
}
// CheckPath checks that a module path is valid.
func CheckPath(path string) error {
if err := checkPath(path, false); err != nil {
return fmt.Errorf("malformed module path %q: %v", path, err)
}
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
if i == 0 {
return fmt.Errorf("malformed module path %q: leading slash", path)
}
if !strings.Contains(path[:i], ".") {
return fmt.Errorf("malformed module path %q: missing dot in first path element", path)
}
if path[0] == '-' {
return fmt.Errorf("malformed module path %q: leading dash in first path element", path)
}
for _, r := range path[:i] {
if !firstPathOK(r) {
return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r)
}
}
if _, _, ok := SplitPathVersion(path); !ok {
return fmt.Errorf("malformed module path %q: invalid version", path)
}
return nil
}
// CheckImportPath checks that an import path is valid.
func CheckImportPath(path string) error {
if err := checkPath(path, false); err != nil {
return fmt.Errorf("malformed import path %q: %v", path, err)
}
return nil
}
// checkPath checks that a general path is valid.
// It returns an error describing why but not mentioning path.
// Because these checks apply to both module paths and import paths,
// the caller is expected to add the "malformed ___ path %q: " prefix.
// fileName indicates whether the final element of the path is a file name
// (as opposed to a directory name).
func checkPath(path string, fileName bool) error {
if !utf8.ValidString(path) {
return fmt.Errorf("invalid UTF-8")
}
if path == "" {
return fmt.Errorf("empty string")
}
if strings.Contains(path, "..") {
return fmt.Errorf("double dot")
}
if strings.Contains(path, "//") {
return fmt.Errorf("double slash")
}
if path[len(path)-1] == '/' {
return fmt.Errorf("trailing slash")
}
elemStart := 0
for i, r := range path {
if r == '/' {
if err := checkElem(path[elemStart:i], fileName); err != nil {
return err
}
elemStart = i + 1
}
}
if err := checkElem(path[elemStart:], fileName); err != nil {
return err
}
return nil
}
// checkElem checks whether an individual path element is valid.
// fileName indicates whether the element is a file name (not a directory name).
func checkElem(elem string, fileName bool) error {
if elem == "" {
return fmt.Errorf("empty path element")
}
if strings.Count(elem, ".") == len(elem) {
return fmt.Errorf("invalid path element %q", elem)
}
if elem[0] == '.' && !fileName {
return fmt.Errorf("leading dot in path element")
}
if elem[len(elem)-1] == '.' {
return fmt.Errorf("trailing dot in path element")
}
charOK := pathOK
if fileName {
charOK = fileNameOK
}
for _, r := range elem {
if !charOK(r) {
return fmt.Errorf("invalid char %q", r)
}
}
// Windows disallows a bunch of path elements, sadly.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
short := elem
if i := strings.Index(short, "."); i >= 0 {
short = short[:i]
}
for _, bad := range badWindowsNames {
if strings.EqualFold(bad, short) {
return fmt.Errorf("disallowed path element %q", elem)
}
}
return nil
}
// CheckFilePath checks whether a slash-separated file path is valid.
func CheckFilePath(path string) error {
if err := checkPath(path, true); err != nil {
return fmt.Errorf("malformed file path %q: %v", path, err)
}
return nil
}
// badWindowsNames are the reserved file path elements on Windows.
// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
var badWindowsNames = []string{
"CON",
"PRN",
"AUX",
"NUL",
"COM1",
"COM2",
"COM3",
"COM4",
"COM5",
"COM6",
"COM7",
"COM8",
"COM9",
"LPT1",
"LPT2",
"LPT3",
"LPT4",
"LPT5",
"LPT6",
"LPT7",
"LPT8",
"LPT9",
}
// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path
// and version is either empty or "/vN" for N >= 2.
// As a special case, gopkg.in paths are recognized directly;
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
if strings.HasPrefix(path, "gopkg.in/") {
return splitGopkgIn(path)
}
i := len(path)
dot := false
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') {
if path[i-1] == '.' {
dot = true
}
i--
}
if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' {
return path, "", true
}
prefix, pathMajor = path[:i-2], path[i-2:]
if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" {
return path, "", false
}
return prefix, pathMajor, true
}
// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths.
func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
if !strings.HasPrefix(path, "gopkg.in/") {
return path, "", false
}
i := len(path)
if strings.HasSuffix(path, "-unstable") {
i -= len("-unstable")
}
for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') {
i--
}
if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' {
// All gopkg.in paths must end in vN for some N.
return path, "", false
}
prefix, pathMajor = path[:i-2], path[i-2:]
if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" {
return path, "", false
}
return prefix, pathMajor, true
}
// MatchPathMajor reports whether the semantic version v
// matches the path major version pathMajor.
func MatchPathMajor(v, pathMajor string) bool {
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
}
if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
// Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
// For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
return true
}
m := semver.Major(v)
if pathMajor == "" {
return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible"
}
return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:]
}
// CanonicalVersion returns the canonical form of the version string v.
// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible".
func CanonicalVersion(v string) string {
cv := semver.Canonical(v)
if semver.Build(v) == "+incompatible" {
cv += "+incompatible"
}
return cv
}
// Sort sorts the list by Path, breaking ties by comparing Versions.
func Sort(list []Version) {
sort.Slice(list, func(i, j int) bool {
mi := list[i]
mj := list[j]
if mi.Path != mj.Path {
return mi.Path < mj.Path
}
// To help go.sum formatting, allow version/file.
// Compare semver prefix by semver rules,
// file by string order.
vi := mi.Version
vj := mj.Version
var fi, fj string
if k := strings.Index(vi, "/"); k >= 0 {
vi, fi = vi[:k], vi[k:]
}
if k := strings.Index(vj, "/"); k >= 0 {
vj, fj = vj[:k], vj[k:]
}
if vi != vj {
return semver.Compare(vi, vj) < 0
}
return fi < fj
})
}
// Safe encodings
//
// Module paths appear as substrings of file system paths
// (in the download cache) and of web server URLs in the proxy protocol.
// In general we cannot rely on file systems to be case-sensitive,
// nor can we rely on web servers, since they read from file systems.
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
// and rsc.io/quote separate. Windows and macOS don't.
// Instead, we must never require two different casings of a file path.
// Because we want the download cache to match the proxy protocol,
// and because we want the proxy protocol to be possible to serve
// from a tree of static files (which might be stored on a case-insensitive
// file system), the proxy protocol must never require two different casings
// of a URL path either.
//
// One possibility would be to make the safe encoding be the lowercase
// hexadecimal encoding of the actual path bytes. This would avoid ever
// needing different casings of a file path, but it would be fairly illegible
// to most programmers when those paths appeared in the file system
// (including in file paths in compiler errors and stack traces)
// in web server logs, and so on. Instead, we want a safe encoding that
// leaves most paths unaltered.
//
// The safe encoding is this:
// replace every uppercase letter with an exclamation mark
// followed by the letter's lowercase equivalent.
//
// For example,
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
//
// Import paths that avoid upper-case letters are left unchanged.
// Note that because import paths are ASCII-only and avoid various
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
// and avoids the same problematic punctuation.
//
// Import paths have never allowed exclamation marks, so there is no
// need to define how to encode a literal !.
//
// Although paths are disallowed from using Unicode (see pathOK above),
// the eventual plan is to allow Unicode letters as well, to assume that
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
// the !-for-uppercase convention. Note however that not all runes that
// are different but case-fold equivalent are an upper/lower pair.
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('' for Kelvin)
// are considered to case-fold to each other. When we do add Unicode
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
//
// Also, it would be nice to allow Unicode marks as well as letters,
// but marks include combining marks, and then we must deal not
// only with case folding but also normalization: both U+00E9 ('é')
// and U+0065 U+0301 ('e' followed by combining acute accent)
// look the same on the page and are treated by some file systems
// as the same path. If we do allow Unicode marks in paths, there
// must be some kind of normalization to allow only one canonical
// encoding of any character used in an import path.
// EncodePath returns the safe encoding of the given module path.
// It fails if the module path is invalid.
func EncodePath(path string) (encoding string, err error) {
if err := CheckPath(path); err != nil {
return "", err
}
return encodeString(path)
}
// EncodeVersion returns the safe encoding of the given module version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func EncodeVersion(v string) (encoding string, err error) {
if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
return "", fmt.Errorf("disallowed version string %q", v)
}
return encodeString(v)
}
func encodeString(s string) (encoding string, err error) {
haveUpper := false
for _, r := range s {
if r == '!' || r >= utf8.RuneSelf {
// This should be disallowed by CheckPath, but diagnose anyway.
// The correctness of the encoding loop below depends on it.
return "", fmt.Errorf("internal error: inconsistency in EncodePath")
}
if 'A' <= r && r <= 'Z' {
haveUpper = true
}
}
if !haveUpper {
return s, nil
}
var buf []byte
for _, r := range s {
if 'A' <= r && r <= 'Z' {
buf = append(buf, '!', byte(r+'a'-'A'))
} else {
buf = append(buf, byte(r))
}
}
return string(buf), nil
}
// DecodePath returns the module path of the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid path.
func DecodePath(encoding string) (path string, err error) {
path, ok := decodeString(encoding)
if !ok {
return "", fmt.Errorf("invalid module path encoding %q", encoding)
}
if err := CheckPath(path); err != nil {
return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
}
return path, nil
}
// DecodeVersion returns the version string for the given safe encoding.
// It fails if the encoding is invalid or encodes an invalid version.
// Versions are allowed to be in non-semver form but must be valid file names
// and not contain exclamation marks.
func DecodeVersion(encoding string) (v string, err error) {
v, ok := decodeString(encoding)
if !ok {
return "", fmt.Errorf("invalid version encoding %q", encoding)
}
if err := checkElem(v, true); err != nil {
return "", fmt.Errorf("disallowed version string %q", v)
}
return v, nil
}
func decodeString(encoding string) (string, bool) {
var buf []byte
bang := false
for _, r := range encoding {
if r >= utf8.RuneSelf {
return "", false
}
if bang {
bang = false
if r < 'a' || 'z' < r {
return "", false
}
buf = append(buf, byte(r+'A'-'a'))
continue
}
if r == '!' {
bang = true
continue
}
if 'A' <= r && r <= 'Z' {
return "", false
}
buf = append(buf, byte(r))
}
if bang {
return "", false
}
return string(buf), true
}

388
vendor/golang.org/x/tools/internal/semver/semver.go generated vendored Normal file
View File

@ -0,0 +1,388 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semver implements comparison of semantic version strings.
// In this package, semantic version strings must begin with a leading "v",
// as in "v1.0.0".
//
// The general form of a semantic version string accepted by this package is
//
// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
//
// where square brackets indicate optional parts of the syntax;
// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
// using only alphanumeric characters and hyphens; and
// all-numeric PRERELEASE identifiers must not have leading zeros.
//
// This package follows Semantic Versioning 2.0.0 (see semver.org)
// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
package semver
// parsed returns the parsed form of a semantic version string.
type parsed struct {
major string
minor string
patch string
short string
prerelease string
build string
err string
}
// IsValid reports whether v is a valid semantic version string.
func IsValid(v string) bool {
_, ok := parse(v)
return ok
}
// Canonical returns the canonical formatting of the semantic version v.
// It fills in any missing .MINOR or .PATCH and discards build metadata.
// Two semantic versions compare equal only if their canonical formattings
// are identical strings.
// The canonical invalid semantic version is the empty string.
func Canonical(v string) string {
p, ok := parse(v)
if !ok {
return ""
}
if p.build != "" {
return v[:len(v)-len(p.build)]
}
if p.short != "" {
return v + p.short
}
return v
}
// Major returns the major version prefix of the semantic version v.
// For example, Major("v2.1.0") == "v2".
// If v is an invalid semantic version string, Major returns the empty string.
func Major(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return v[:1+len(pv.major)]
}
// MajorMinor returns the major.minor version prefix of the semantic version v.
// For example, MajorMinor("v2.1.0") == "v2.1".
// If v is an invalid semantic version string, MajorMinor returns the empty string.
func MajorMinor(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
i := 1 + len(pv.major)
if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
return v[:j]
}
return v[:i] + "." + pv.minor
}
// Prerelease returns the prerelease suffix of the semantic version v.
// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
// If v is an invalid semantic version string, Prerelease returns the empty string.
func Prerelease(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.prerelease
}
// Build returns the build suffix of the semantic version v.
// For example, Build("v2.1.0+meta") == "+meta".
// If v is an invalid semantic version string, Build returns the empty string.
func Build(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.build
}
// Compare returns an integer comparing two versions according to
// according to semantic version precedence.
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
//
// An invalid semantic version string is considered less than a valid one.
// All invalid semantic version strings compare equal to each other.
func Compare(v, w string) int {
pv, ok1 := parse(v)
pw, ok2 := parse(w)
if !ok1 && !ok2 {
return 0
}
if !ok1 {
return -1
}
if !ok2 {
return +1
}
if c := compareInt(pv.major, pw.major); c != 0 {
return c
}
if c := compareInt(pv.minor, pw.minor); c != 0 {
return c
}
if c := compareInt(pv.patch, pw.patch); c != 0 {
return c
}
return comparePrerelease(pv.prerelease, pw.prerelease)
}
// Max canonicalizes its arguments and then returns the version string
// that compares greater.
func Max(v, w string) string {
v = Canonical(v)
w = Canonical(w)
if Compare(v, w) > 0 {
return v
}
return w
}
func parse(v string) (p parsed, ok bool) {
if v == "" || v[0] != 'v' {
p.err = "missing v prefix"
return
}
p.major, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad major version"
return
}
if v == "" {
p.minor = "0"
p.patch = "0"
p.short = ".0.0"
return
}
if v[0] != '.' {
p.err = "bad minor prefix"
ok = false
return
}
p.minor, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad minor version"
return
}
if v == "" {
p.patch = "0"
p.short = ".0"
return
}
if v[0] != '.' {
p.err = "bad patch prefix"
ok = false
return
}
p.patch, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad patch version"
return
}
if len(v) > 0 && v[0] == '-' {
p.prerelease, v, ok = parsePrerelease(v)
if !ok {
p.err = "bad prerelease"
return
}
}
if len(v) > 0 && v[0] == '+' {
p.build, v, ok = parseBuild(v)
if !ok {
p.err = "bad build"
return
}
}
if v != "" {
p.err = "junk on end"
ok = false
return
}
ok = true
return
}
func parseInt(v string) (t, rest string, ok bool) {
if v == "" {
return
}
if v[0] < '0' || '9' < v[0] {
return
}
i := 1
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
if v[0] == '0' && i != 1 {
return
}
return v[:i], v[i:], true
}
func parsePrerelease(v string) (t, rest string, ok bool) {
// "A pre-release version MAY be denoted by appending a hyphen and
// a series of dot separated identifiers immediately following the patch version.
// Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
// Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
if v == "" || v[0] != '-' {
return
}
i := 1
start := 1
for i < len(v) && v[i] != '+' {
if !isIdentChar(v[i]) && v[i] != '.' {
return
}
if v[i] == '.' {
if start == i || isBadNum(v[start:i]) {
return
}
start = i + 1
}
i++
}
if start == i || isBadNum(v[start:i]) {
return
}
return v[:i], v[i:], true
}
func parseBuild(v string) (t, rest string, ok bool) {
if v == "" || v[0] != '+' {
return
}
i := 1
start := 1
for i < len(v) {
if !isIdentChar(v[i]) {
return
}
if v[i] == '.' {
if start == i {
return
}
start = i + 1
}
i++
}
if start == i {
return
}
return v[:i], v[i:], true
}
func isIdentChar(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
}
func isBadNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v) && i > 1 && v[0] == '0'
}
func isNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v)
}
func compareInt(x, y string) int {
if x == y {
return 0
}
if len(x) < len(y) {
return -1
}
if len(x) > len(y) {
return +1
}
if x < y {
return -1
} else {
return +1
}
}
func comparePrerelease(x, y string) int {
// "When major, minor, and patch are equal, a pre-release version has
// lower precedence than a normal version.
// Example: 1.0.0-alpha < 1.0.0.
// Precedence for two pre-release versions with the same major, minor,
// and patch version MUST be determined by comparing each dot separated
// identifier from left to right until a difference is found as follows:
// identifiers consisting of only digits are compared numerically and
// identifiers with letters or hyphens are compared lexically in ASCII
// sort order. Numeric identifiers always have lower precedence than
// non-numeric identifiers. A larger set of pre-release fields has a
// higher precedence than a smaller set, if all of the preceding
// identifiers are equal.
// Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
// 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
if x == y {
return 0
}
if x == "" {
return +1
}
if y == "" {
return -1
}
for x != "" && y != "" {
x = x[1:] // skip - or .
y = y[1:] // skip - or .
var dx, dy string
dx, x = nextIdent(x)
dy, y = nextIdent(y)
if dx != dy {
ix := isNum(dx)
iy := isNum(dy)
if ix != iy {
if ix {
return -1
} else {
return +1
}
}
if ix {
if len(dx) < len(dy) {
return -1
}
if len(dx) > len(dy) {
return +1
}
}
if dx < dy {
return -1
} else {
return +1
}
}
}
if x == "" {
return -1
} else {
return +1
}
}
func nextIdent(x string) (dx, rest string) {
i := 0
for i < len(x) && x[i] != '.' {
i++
}
return x[:i], x[i:]
}

202
vendor/k8s.io/code-generator/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

10
vendor/k8s.io/code-generator/cmd/client-gen/OWNERS generated vendored Normal file
View File

@ -0,0 +1,10 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- lavalamp
- wojtek-t
- caesarxuchao
reviewers:
- lavalamp
- wojtek-t
- caesarxuchao

View File

@ -0,0 +1,4 @@
See [generating-clientset.md](https://git.k8s.io/community/contributors/devel/sig-api-machinery/generating-clientset.md)
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/staging/src/k8s.io/code-generator/client-gen/README.md?pixel)]()

View File

@ -0,0 +1,120 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package args
import (
"fmt"
"path"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
"k8s.io/code-generator/cmd/client-gen/types"
codegenutil "k8s.io/code-generator/pkg/util"
)
var DefaultInputDirs = []string{}
// ClientGenArgs is a wrapper for arguments to client-gen.
type CustomArgs struct {
// A sorted list of group versions to generate. For each of them the package path is found
// in GroupVersionToInputPath.
Groups []types.GroupVersions
// Overrides for which types should be included in the client.
IncludedTypesOverrides map[types.GroupVersion][]string
// ClientsetName is the name of the clientset to be generated. It's
// populated from command-line arguments.
ClientsetName string
// ClientsetAPIPath is the default API HTTP path for generated clients.
ClientsetAPIPath string
// ClientsetOnly determines if we should generate the clients for groups and
// types along with the clientset. It's populated from command-line
// arguments.
ClientsetOnly bool
// FakeClient determines if client-gen generates the fake clients.
FakeClient bool
}
func NewDefaults() (*args.GeneratorArgs, *CustomArgs) {
genericArgs := args.Default().WithoutDefaultFlagParsing()
customArgs := &CustomArgs{
ClientsetName: "internalclientset",
ClientsetAPIPath: "/apis",
ClientsetOnly: false,
FakeClient: true,
}
genericArgs.CustomArgs = customArgs
genericArgs.InputDirs = DefaultInputDirs
if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 {
genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/clientset")
}
return genericArgs, customArgs
}
func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet, inputBase string) {
gvsBuilder := NewGroupVersionsBuilder(&ca.Groups)
pflag.Var(NewGVPackagesValue(gvsBuilder, nil), "input", "group/versions that client-gen will generate clients for. At most one version per group is allowed. Specified in the format \"group1/version1,group2/version2...\".")
pflag.Var(NewGVTypesValue(&ca.IncludedTypesOverrides, []string{}), "included-types-overrides", "list of group/version/type for which client should be generated. By default, client is generated for all types which have genclient in types.go. This overrides that. For each groupVersion in this list, only the types mentioned here will be included. The default check of genclient will be used for other group versions.")
pflag.Var(NewInputBasePathValue(gvsBuilder, inputBase), "input-base", "base path to look for the api group.")
pflag.StringVarP(&ca.ClientsetName, "clientset-name", "n", ca.ClientsetName, "the name of the generated clientset package.")
pflag.StringVarP(&ca.ClientsetAPIPath, "clientset-api-path", "", ca.ClientsetAPIPath, "the value of default API HTTP path, starting with / and without trailing /.")
pflag.BoolVar(&ca.ClientsetOnly, "clientset-only", ca.ClientsetOnly, "when set, client-gen only generates the clientset shell, without generating the individual typed clients")
pflag.BoolVar(&ca.FakeClient, "fake-clientset", ca.FakeClient, "when set, client-gen will generate the fake clientset that can be used in tests")
// support old flags
fs.SetNormalizeFunc(mapFlagName("clientset-path", "output-package", fs.GetNormalizeFunc()))
}
func Validate(genericArgs *args.GeneratorArgs) error {
customArgs := genericArgs.CustomArgs.(*CustomArgs)
if len(genericArgs.OutputPackagePath) == 0 {
return fmt.Errorf("output package cannot be empty")
}
if len(customArgs.ClientsetName) == 0 {
return fmt.Errorf("clientset name cannot be empty")
}
if len(customArgs.ClientsetAPIPath) == 0 {
return fmt.Errorf("clientset API path cannot be empty")
}
return nil
}
// GroupVersionPackages returns a map from GroupVersion to the package with the types.go.
func (ca *CustomArgs) GroupVersionPackages() map[types.GroupVersion]string {
res := map[types.GroupVersion]string{}
for _, pkg := range ca.Groups {
for _, v := range pkg.Versions {
res[types.GroupVersion{Group: pkg.Group, Version: v.Version}] = v.Package
}
}
return res
}
func mapFlagName(from, to string, old func(fs *pflag.FlagSet, name string) pflag.NormalizedName) func(fs *pflag.FlagSet, name string) pflag.NormalizedName {
return func(fs *pflag.FlagSet, name string) pflag.NormalizedName {
if name == from {
name = to
}
return old(fs, name)
}
}

View File

@ -0,0 +1,183 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package args
import (
"bytes"
"encoding/csv"
"flag"
"path"
"sort"
"strings"
"k8s.io/code-generator/cmd/client-gen/types"
)
type inputBasePathValue struct {
builder *groupVersionsBuilder
}
var _ flag.Value = &inputBasePathValue{}
func NewInputBasePathValue(builder *groupVersionsBuilder, def string) *inputBasePathValue {
v := &inputBasePathValue{
builder: builder,
}
v.Set(def)
return v
}
func (s *inputBasePathValue) Set(val string) error {
s.builder.importBasePath = val
return s.builder.update()
}
func (s *inputBasePathValue) Type() string {
return "string"
}
func (s *inputBasePathValue) String() string {
return s.builder.importBasePath
}
type gvPackagesValue struct {
builder *groupVersionsBuilder
groups []string
changed bool
}
func NewGVPackagesValue(builder *groupVersionsBuilder, def []string) *gvPackagesValue {
gvp := new(gvPackagesValue)
gvp.builder = builder
if def != nil {
if err := gvp.set(def); err != nil {
panic(err)
}
}
return gvp
}
var _ flag.Value = &gvPackagesValue{}
func (s *gvPackagesValue) set(vs []string) error {
if s.changed {
s.groups = append(s.groups, vs...)
} else {
s.groups = append([]string(nil), vs...)
}
s.builder.groups = s.groups
return s.builder.update()
}
func (s *gvPackagesValue) Set(val string) error {
vs, err := readAsCSV(val)
if err != nil {
return err
}
if err := s.set(vs); err != nil {
return err
}
s.changed = true
return nil
}
func (s *gvPackagesValue) Type() string {
return "stringSlice"
}
func (s *gvPackagesValue) String() string {
str, _ := writeAsCSV(s.groups)
return "[" + str + "]"
}
type groupVersionsBuilder struct {
value *[]types.GroupVersions
groups []string
importBasePath string
}
func NewGroupVersionsBuilder(groups *[]types.GroupVersions) *groupVersionsBuilder {
return &groupVersionsBuilder{
value: groups,
}
}
func (p *groupVersionsBuilder) update() error {
var seenGroups = make(map[types.Group]*types.GroupVersions)
for _, v := range p.groups {
pth, gvString := parsePathGroupVersion(v)
gv, err := types.ToGroupVersion(gvString)
if err != nil {
return err
}
versionPkg := types.PackageVersion{Package: path.Join(p.importBasePath, pth, gv.Group.NonEmpty(), gv.Version.String()), Version: gv.Version}
if group, ok := seenGroups[gv.Group]; ok {
seenGroups[gv.Group].Versions = append(group.Versions, versionPkg)
} else {
seenGroups[gv.Group] = &types.GroupVersions{
PackageName: gv.Group.NonEmpty(),
Group: gv.Group,
Versions: []types.PackageVersion{versionPkg},
}
}
}
var groupNames []string
for groupName := range seenGroups {
groupNames = append(groupNames, groupName.String())
}
sort.Strings(groupNames)
*p.value = []types.GroupVersions{}
for _, groupName := range groupNames {
*p.value = append(*p.value, *seenGroups[types.Group(groupName)])
}
return nil
}
func parsePathGroupVersion(pgvString string) (gvPath string, gvString string) {
subs := strings.Split(pgvString, "/")
length := len(subs)
switch length {
case 0, 1, 2:
return "", pgvString
default:
return strings.Join(subs[:length-2], "/"), strings.Join(subs[length-2:], "/")
}
}
func readAsCSV(val string) ([]string, error) {
if val == "" {
return []string{}, nil
}
stringReader := strings.NewReader(val)
csvReader := csv.NewReader(stringReader)
return csvReader.Read()
}
func writeAsCSV(vals []string) (string, error) {
b := &bytes.Buffer{}
w := csv.NewWriter(b)
err := w.Write(vals)
if err != nil {
return "", err
}
w.Flush()
return strings.TrimSuffix(b.String(), "\n"), nil
}

View File

@ -0,0 +1,110 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package args
import (
"flag"
"fmt"
"strings"
"k8s.io/code-generator/cmd/client-gen/types"
)
type gvTypeValue struct {
gvToTypes *map[types.GroupVersion][]string
changed bool
}
func NewGVTypesValue(gvToTypes *map[types.GroupVersion][]string, def []string) *gvTypeValue {
gvt := new(gvTypeValue)
gvt.gvToTypes = gvToTypes
if def != nil {
if err := gvt.set(def); err != nil {
panic(err)
}
}
return gvt
}
var _ flag.Value = &gvTypeValue{}
func (s *gvTypeValue) set(vs []string) error {
if !s.changed {
*s.gvToTypes = map[types.GroupVersion][]string{}
}
for _, input := range vs {
gvString, typeStr, err := parseGroupVersionType(input)
if err != nil {
return err
}
gv, err := types.ToGroupVersion(gvString)
if err != nil {
return err
}
types, ok := (*s.gvToTypes)[gv]
if !ok {
types = []string{}
}
types = append(types, typeStr)
(*s.gvToTypes)[gv] = types
}
return nil
}
func (s *gvTypeValue) Set(val string) error {
vs, err := readAsCSV(val)
if err != nil {
return err
}
if err := s.set(vs); err != nil {
return err
}
s.changed = true
return nil
}
func (s *gvTypeValue) Type() string {
return "stringSlice"
}
func (s *gvTypeValue) String() string {
strs := make([]string, 0, len(*s.gvToTypes))
for gv, ts := range *s.gvToTypes {
for _, t := range ts {
strs = append(strs, gv.Group.String()+"/"+gv.Version.String()+"/"+t)
}
}
str, _ := writeAsCSV(strs)
return "[" + str + "]"
}
func parseGroupVersionType(gvtString string) (gvString string, typeStr string, err error) {
invalidFormatErr := fmt.Errorf("invalid value: %s, should be of the form group/version/type", gvtString)
subs := strings.Split(gvtString, "/")
length := len(subs)
switch length {
case 2:
// gvtString of the form group/type, e.g. api/Service,extensions/ReplicaSet
return subs[0] + "/", subs[1], nil
case 3:
return strings.Join(subs[:length-1], "/"), subs[length-1], nil
default:
return "", "", invalidFormatErr
}
}

View File

@ -0,0 +1,403 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package generators has the generators for the client-gen utility.
package generators
import (
"path/filepath"
"strings"
clientgenargs "k8s.io/code-generator/cmd/client-gen/args"
"k8s.io/code-generator/cmd/client-gen/generators/fake"
"k8s.io/code-generator/cmd/client-gen/generators/scheme"
"k8s.io/code-generator/cmd/client-gen/generators/util"
"k8s.io/code-generator/cmd/client-gen/path"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
codegennamer "k8s.io/code-generator/pkg/namer"
"k8s.io/gengo/args"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/klog"
)
// NameSystems returns the name system used by the generators in this package.
func NameSystems() namer.NameSystems {
pluralExceptions := map[string]string{
"Endpoints": "Endpoints",
}
lowercaseNamer := namer.NewAllLowercasePluralNamer(pluralExceptions)
publicNamer := &ExceptionNamer{
Exceptions: map[string]string{
// these exceptions are used to deconflict the generated code
// you can put your fully qualified package like
// to generate a name that doesn't conflict with your group.
// "k8s.io/apis/events/v1beta1.Event": "EventResource"
},
KeyFunc: func(t *types.Type) string {
return t.Name.Package + "." + t.Name.Name
},
Delegate: namer.NewPublicNamer(0),
}
privateNamer := &ExceptionNamer{
Exceptions: map[string]string{
// these exceptions are used to deconflict the generated code
// you can put your fully qualified package like
// to generate a name that doesn't conflict with your group.
// "k8s.io/apis/events/v1beta1.Event": "eventResource"
},
KeyFunc: func(t *types.Type) string {
return t.Name.Package + "." + t.Name.Name
},
Delegate: namer.NewPrivateNamer(0),
}
publicPluralNamer := &ExceptionNamer{
Exceptions: map[string]string{
// these exceptions are used to deconflict the generated code
// you can put your fully qualified package like
// to generate a name that doesn't conflict with your group.
// "k8s.io/apis/events/v1beta1.Event": "EventResource"
},
KeyFunc: func(t *types.Type) string {
return t.Name.Package + "." + t.Name.Name
},
Delegate: namer.NewPublicPluralNamer(pluralExceptions),
}
privatePluralNamer := &ExceptionNamer{
Exceptions: map[string]string{
// you can put your fully qualified package like
// to generate a name that doesn't conflict with your group.
// "k8s.io/apis/events/v1beta1.Event": "eventResource"
// these exceptions are used to deconflict the generated code
"k8s.io/apis/events/v1beta1.Event": "eventResources",
"k8s.io/kubernetes/pkg/apis/events.Event": "eventResources",
},
KeyFunc: func(t *types.Type) string {
return t.Name.Package + "." + t.Name.Name
},
Delegate: namer.NewPrivatePluralNamer(pluralExceptions),
}
return namer.NameSystems{
"singularKind": namer.NewPublicNamer(0),
"public": publicNamer,
"private": privateNamer,
"raw": namer.NewRawNamer("", nil),
"publicPlural": publicPluralNamer,
"privatePlural": privatePluralNamer,
"allLowercasePlural": lowercaseNamer,
"resource": codegennamer.NewTagOverrideNamer("resourceName", lowercaseNamer),
}
}
// ExceptionNamer allows you specify exceptional cases with exact names. This allows you to have control
// for handling various conflicts, like group and resource names for instance.
type ExceptionNamer struct {
Exceptions map[string]string
KeyFunc func(*types.Type) string
Delegate namer.Namer
}
// Name provides the requested name for a type.
func (n *ExceptionNamer) Name(t *types.Type) string {
key := n.KeyFunc(t)
if exception, ok := n.Exceptions[key]; ok {
return exception
}
return n.Delegate.Name(t)
}
// DefaultNameSystem returns the default name system for ordering the types to be
// processed by the generators in this package.
func DefaultNameSystem() string {
return "public"
}
func packageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, apiPath string, srcTreePath string, inputPackage string, boilerplate []byte) generator.Package {
groupVersionClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty()))
return &generator.DefaultPackage{
PackageName: strings.ToLower(gv.Version.NonEmpty()),
PackagePath: groupVersionClientPackage,
HeaderText: boilerplate,
PackageDocumentation: []byte(
`// This package has the automatically generated typed clients.
`),
// GeneratorFunc returns a list of generators. Each generator makes a
// single file.
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = []generator.Generator{
// Always generate a "doc.go" file.
generator.DefaultGen{OptionalName: "doc"},
}
// Since we want a file per type that we generate a client for, we
// have to provide a function for this.
for _, t := range typeList {
generators = append(generators, &genClientForType{
DefaultGen: generator.DefaultGen{
OptionalName: strings.ToLower(c.Namers["private"].Name(t)),
},
outputPackage: groupVersionClientPackage,
clientsetPackage: clientsetPackage,
group: gv.Group.NonEmpty(),
version: gv.Version.String(),
groupGoName: groupGoName,
typeToMatch: t,
imports: generator.NewImportTracker(),
})
}
generators = append(generators, &genGroup{
DefaultGen: generator.DefaultGen{
OptionalName: groupPackageName + "_client",
},
outputPackage: groupVersionClientPackage,
inputPackage: inputPackage,
clientsetPackage: clientsetPackage,
group: gv.Group.NonEmpty(),
version: gv.Version.String(),
groupGoName: groupGoName,
apiPath: apiPath,
types: typeList,
imports: generator.NewImportTracker(),
})
expansionFileName := "generated_expansion"
generators = append(generators, &genExpansion{
groupPackagePath: filepath.Join(srcTreePath, groupVersionClientPackage),
DefaultGen: generator.DefaultGen{
OptionalName: expansionFileName,
},
types: typeList,
})
return generators
},
FilterFunc: func(c *generator.Context, t *types.Type) bool {
return util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient
},
}
}
func packageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package {
return &generator.DefaultPackage{
PackageName: customArgs.ClientsetName,
PackagePath: clientsetPackage,
HeaderText: boilerplate,
PackageDocumentation: []byte(
`// This package has the automatically generated clientset.
`),
// GeneratorFunc returns a list of generators. Each generator generates a
// single file.
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = []generator.Generator{
// Always generate a "doc.go" file.
generator.DefaultGen{OptionalName: "doc"},
&genClientset{
DefaultGen: generator.DefaultGen{
OptionalName: "clientset",
},
groups: customArgs.Groups,
groupGoNames: groupGoNames,
clientsetPackage: clientsetPackage,
outputPackage: customArgs.ClientsetName,
imports: generator.NewImportTracker(),
},
}
return generators
},
}
}
func packageForScheme(customArgs *clientgenargs.CustomArgs, clientsetPackage string, srcTreePath string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package {
schemePackage := filepath.Join(clientsetPackage, "scheme")
// create runtime.Registry for internal client because it has to know about group versions
internalClient := false
NextGroup:
for _, group := range customArgs.Groups {
for _, v := range group.Versions {
if v.String() == "" {
internalClient = true
break NextGroup
}
}
}
return &generator.DefaultPackage{
PackageName: "scheme",
PackagePath: schemePackage,
HeaderText: boilerplate,
PackageDocumentation: []byte(
`// This package contains the scheme of the automatically generated clientset.
`),
// GeneratorFunc returns a list of generators. Each generator generates a
// single file.
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = []generator.Generator{
// Always generate a "doc.go" file.
generator.DefaultGen{OptionalName: "doc"},
&scheme.GenScheme{
DefaultGen: generator.DefaultGen{
OptionalName: "register",
},
InputPackages: customArgs.GroupVersionPackages(),
OutputPackage: schemePackage,
OutputPath: filepath.Join(srcTreePath, schemePackage),
Groups: customArgs.Groups,
GroupGoNames: groupGoNames,
ImportTracker: generator.NewImportTracker(),
CreateRegistry: internalClient,
},
}
return generators
},
}
}
// applyGroupOverrides applies group name overrides to each package, if applicable. If there is a
// comment of the form "// +groupName=somegroup" or "// +groupName=somegroup.foo.bar.io", use the
// first field (somegroup) as the name of the group in Go code, e.g. as the func name in a clientset.
//
// If the first field of the groupName is not unique within the clientset, use "// +groupName=unique
func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.CustomArgs) {
// Create a map from "old GV" to "new GV" so we know what changes we need to make.
changes := make(map[clientgentypes.GroupVersion]clientgentypes.GroupVersion)
for gv, inputDir := range customArgs.GroupVersionPackages() {
p := universe.Package(inputDir)
if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil {
newGV := clientgentypes.GroupVersion{
Group: clientgentypes.Group(override[0]),
Version: gv.Version,
}
changes[gv] = newGV
}
}
// Modify customArgs.Groups based on the groupName overrides.
newGroups := make([]clientgentypes.GroupVersions, 0, len(customArgs.Groups))
for _, gvs := range customArgs.Groups {
gv := clientgentypes.GroupVersion{
Group: gvs.Group,
Version: gvs.Versions[0].Version, // we only need a version, and the first will do
}
if newGV, ok := changes[gv]; ok {
// There's an override, so use it.
newGVS := clientgentypes.GroupVersions{
PackageName: gvs.PackageName,
Group: newGV.Group,
Versions: gvs.Versions,
}
newGroups = append(newGroups, newGVS)
} else {
// No override.
newGroups = append(newGroups, gvs)
}
}
customArgs.Groups = newGroups
}
// Packages makes the client package definition.
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
klog.Fatalf("Failed loading boilerplate: %v", err)
}
customArgs, ok := arguments.CustomArgs.(*clientgenargs.CustomArgs)
if !ok {
klog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs")
}
includedTypesOverrides := customArgs.IncludedTypesOverrides
applyGroupOverrides(context.Universe, customArgs)
gvToTypes := map[clientgentypes.GroupVersion][]*types.Type{}
groupGoNames := make(map[clientgentypes.GroupVersion]string)
for gv, inputDir := range customArgs.GroupVersionPackages() {
p := context.Universe.Package(path.Vendorless(inputDir))
// If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as
// the Go group identifier in CamelCase. It defaults
groupGoNames[gv] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0])
if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil {
groupGoNames[gv] = namer.IC(override[0])
}
// Package are indexed with the vendor prefix stripped
for n, t := range p.Types {
// filter out types which are not included in user specified overrides.
typesOverride, ok := includedTypesOverrides[gv]
if ok {
found := false
for _, typeStr := range typesOverride {
if typeStr == n {
found = true
break
}
}
if !found {
continue
}
} else {
// User has not specified any override for this group version.
// filter out types which dont have genclient.
if tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)); !tags.GenerateClient {
continue
}
}
if _, found := gvToTypes[gv]; !found {
gvToTypes[gv] = []*types.Type{}
}
gvToTypes[gv] = append(gvToTypes[gv], t)
}
}
var packageList []generator.Package
clientsetPackage := filepath.Join(arguments.OutputPackagePath, customArgs.ClientsetName)
packageList = append(packageList, packageForClientset(customArgs, clientsetPackage, groupGoNames, boilerplate))
packageList = append(packageList, packageForScheme(customArgs, clientsetPackage, arguments.OutputBase, groupGoNames, boilerplate))
if customArgs.FakeClient {
packageList = append(packageList, fake.PackageForClientset(customArgs, clientsetPackage, groupGoNames, boilerplate))
}
// If --clientset-only=true, we don't regenerate the individual typed clients.
if customArgs.ClientsetOnly {
return generator.Packages(packageList)
}
orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)}
gvPackages := customArgs.GroupVersionPackages()
for _, group := range customArgs.Groups {
for _, version := range group.Versions {
gv := clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}
types := gvToTypes[gv]
inputPath := gvPackages[gv]
packageList = append(packageList, packageForGroup(gv, orderer.OrderTypes(types), clientsetPackage, group.PackageName, groupGoNames[gv], customArgs.ClientsetAPIPath, arguments.OutputBase, inputPath, boilerplate))
if customArgs.FakeClient {
packageList = append(packageList, fake.PackageForGroup(gv, orderer.OrderTypes(types), clientsetPackage, group.PackageName, groupGoNames[gv], inputPath, boilerplate))
}
}
}
return generator.Packages(packageList)
}

View File

@ -0,0 +1,130 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"path/filepath"
"strings"
"k8s.io/gengo/generator"
"k8s.io/gengo/types"
clientgenargs "k8s.io/code-generator/cmd/client-gen/args"
scheme "k8s.io/code-generator/cmd/client-gen/generators/scheme"
"k8s.io/code-generator/cmd/client-gen/generators/util"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
)
func PackageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, clientsetPackage string, groupPackageName string, groupGoName string, inputPackage string, boilerplate []byte) generator.Package {
outputPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty()), "fake")
// TODO: should make this a function, called by here and in client-generator.go
realClientPackage := filepath.Join(clientsetPackage, "typed", strings.ToLower(groupPackageName), strings.ToLower(gv.Version.NonEmpty()))
return &generator.DefaultPackage{
PackageName: "fake",
PackagePath: outputPackage,
HeaderText: boilerplate,
PackageDocumentation: []byte(
`// Package fake has the automatically generated clients.
`),
// GeneratorFunc returns a list of generators. Each generator makes a
// single file.
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = []generator.Generator{
// Always generate a "doc.go" file.
generator.DefaultGen{OptionalName: "doc"},
}
// Since we want a file per type that we generate a client for, we
// have to provide a function for this.
for _, t := range typeList {
generators = append(generators, &genFakeForType{
DefaultGen: generator.DefaultGen{
OptionalName: "fake_" + strings.ToLower(c.Namers["private"].Name(t)),
},
outputPackage: outputPackage,
inputPackage: inputPackage,
group: gv.Group.NonEmpty(),
version: gv.Version.String(),
groupGoName: groupGoName,
typeToMatch: t,
imports: generator.NewImportTracker(),
})
}
generators = append(generators, &genFakeForGroup{
DefaultGen: generator.DefaultGen{
OptionalName: "fake_" + groupPackageName + "_client",
},
outputPackage: outputPackage,
realClientPackage: realClientPackage,
group: gv.Group.NonEmpty(),
version: gv.Version.String(),
groupGoName: groupGoName,
types: typeList,
imports: generator.NewImportTracker(),
})
return generators
},
FilterFunc: func(c *generator.Context, t *types.Type) bool {
return util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient
},
}
}
func PackageForClientset(customArgs *clientgenargs.CustomArgs, clientsetPackage string, groupGoNames map[clientgentypes.GroupVersion]string, boilerplate []byte) generator.Package {
return &generator.DefaultPackage{
// TODO: we'll generate fake clientset for different release in the future.
// Package name and path are hard coded for now.
PackageName: "fake",
PackagePath: filepath.Join(clientsetPackage, "fake"),
HeaderText: boilerplate,
PackageDocumentation: []byte(
`// This package has the automatically generated fake clientset.
`),
// GeneratorFunc returns a list of generators. Each generator generates a
// single file.
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = []generator.Generator{
// Always generate a "doc.go" file.
generator.DefaultGen{OptionalName: "doc"},
&genClientset{
DefaultGen: generator.DefaultGen{
OptionalName: "clientset_generated",
},
groups: customArgs.Groups,
groupGoNames: groupGoNames,
fakeClientsetPackage: clientsetPackage,
outputPackage: "fake",
imports: generator.NewImportTracker(),
realClientsetPackage: clientsetPackage,
},
&scheme.GenScheme{
DefaultGen: generator.DefaultGen{
OptionalName: "register",
},
InputPackages: customArgs.GroupVersionPackages(),
OutputPackage: clientsetPackage,
Groups: customArgs.Groups,
GroupGoNames: groupGoNames,
ImportTracker: generator.NewImportTracker(),
PrivateScheme: true,
},
}
return generators
},
}
}

View File

@ -0,0 +1,169 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"fmt"
"io"
"path/filepath"
"strings"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
)
// genClientset generates a package for a clientset.
type genClientset struct {
generator.DefaultGen
groups []clientgentypes.GroupVersions
groupGoNames map[clientgentypes.GroupVersion]string
fakeClientsetPackage string
outputPackage string
imports namer.ImportTracker
clientsetGenerated bool
// the import path of the generated real clientset.
realClientsetPackage string
}
var _ generator.Generator = &genClientset{}
func (g *genClientset) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
// We only want to call GenerateType() once.
func (g *genClientset) Filter(c *generator.Context, t *types.Type) bool {
ret := !g.clientsetGenerated
g.clientsetGenerated = true
return ret
}
func (g *genClientset) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
for _, group := range g.groups {
for _, version := range group.Versions {
groupClientPackage := filepath.Join(g.fakeClientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty()))
fakeGroupClientPackage := filepath.Join(groupClientPackage, "fake")
groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}])
imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), groupClientPackage))
imports = append(imports, fmt.Sprintf("fake%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), fakeGroupClientPackage))
}
}
// the package that has the clientset Interface
imports = append(imports, fmt.Sprintf("clientset \"%s\"", g.realClientsetPackage))
// imports for the code in commonTemplate
imports = append(imports,
"k8s.io/client-go/testing",
"k8s.io/client-go/discovery",
"fakediscovery \"k8s.io/client-go/discovery/fake\"",
"k8s.io/apimachinery/pkg/runtime",
"k8s.io/apimachinery/pkg/watch",
)
return
}
func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
// TODO: We actually don't need any type information to generate the clientset,
// perhaps we can adapt the go2ild framework to this kind of usage.
sw := generator.NewSnippetWriter(w, c, "$", "$")
allGroups := clientgentypes.ToGroupVersionInfo(g.groups, g.groupGoNames)
sw.Do(common, nil)
sw.Do(checkImpl, nil)
for _, group := range allGroups {
m := map[string]interface{}{
"group": group.Group,
"version": group.Version,
"PackageAlias": group.PackageAlias,
"GroupGoName": group.GroupGoName,
"Version": namer.IC(group.Version.String()),
}
sw.Do(clientsetInterfaceImplTemplate, m)
}
return sw.Error()
}
// This part of code is version-independent, unchanging.
var common = `
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return cs
}
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
`
var checkImpl = `
var _ clientset.Interface = &Clientset{}
`
var clientsetInterfaceImplTemplate = `
// $.GroupGoName$$.Version$ retrieves the $.GroupGoName$$.Version$Client
func (c *Clientset) $.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface {
return &fake$.PackageAlias$.Fake$.GroupGoName$$.Version${Fake: &c.Fake}
}
`
var clientsetInterfaceDefaultVersionImpl = `
// $.GroupGoName$ retrieves the $.GroupGoName$$.Version$Client
func (c *Clientset) $.GroupGoName$() $.PackageAlias$.$.GroupGoName$$.Version$Interface {
return &fake$.PackageAlias$.Fake$.GroupGoName$$.Version${Fake: &c.Fake}
}
`

View File

@ -0,0 +1,130 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"fmt"
"io"
"path/filepath"
"strings"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/code-generator/cmd/client-gen/generators/util"
)
// genFakeForGroup produces a file for a group client, e.g. ExtensionsClient for the extension group.
type genFakeForGroup struct {
generator.DefaultGen
outputPackage string
realClientPackage string
group string
version string
groupGoName string
// types in this group
types []*types.Type
imports namer.ImportTracker
// If the genGroup has been called. This generator should only execute once.
called bool
}
var _ generator.Generator = &genFakeForGroup{}
// We only want to call GenerateType() once per group.
func (g *genFakeForGroup) Filter(c *generator.Context, t *types.Type) bool {
if !g.called {
g.called = true
return true
}
return false
}
func (g *genFakeForGroup) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *genFakeForGroup) Imports(c *generator.Context) (imports []string) {
imports = g.imports.ImportLines()
if len(g.types) != 0 {
imports = append(imports, fmt.Sprintf("%s \"%s\"", strings.ToLower(filepath.Base(g.realClientPackage)), g.realClientPackage))
}
return imports
}
func (g *genFakeForGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
m := map[string]interface{}{
"GroupGoName": g.groupGoName,
"Version": namer.IC(g.version),
"Fake": c.Universe.Type(types.Name{Package: "k8s.io/client-go/testing", Name: "Fake"}),
"RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}),
"RESTClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClient"}),
}
sw.Do(groupClientTemplate, m)
for _, t := range g.types {
tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
if err != nil {
return err
}
wrapper := map[string]interface{}{
"type": t,
"GroupGoName": g.groupGoName,
"Version": namer.IC(g.version),
"realClientPackage": strings.ToLower(filepath.Base(g.realClientPackage)),
}
if tags.NonNamespaced {
sw.Do(getterImplNonNamespaced, wrapper)
continue
}
sw.Do(getterImplNamespaced, wrapper)
}
sw.Do(getRESTClient, m)
return sw.Error()
}
var groupClientTemplate = `
type Fake$.GroupGoName$$.Version$ struct {
*$.Fake|raw$
}
`
var getterImplNamespaced = `
func (c *Fake$.GroupGoName$$.Version$) $.type|publicPlural$(namespace string) $.realClientPackage$.$.type|public$Interface {
return &Fake$.type|publicPlural${c, namespace}
}
`
var getterImplNonNamespaced = `
func (c *Fake$.GroupGoName$$.Version$) $.type|publicPlural$() $.realClientPackage$.$.type|public$Interface {
return &Fake$.type|publicPlural${c}
}
`
var getRESTClient = `
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *Fake$.GroupGoName$$.Version$) RESTClient() $.RESTClientInterface|raw$ {
var ret *$.RESTClient|raw$
return ret
}
`

View File

@ -0,0 +1,479 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"io"
"path/filepath"
"strings"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/code-generator/cmd/client-gen/generators/util"
"k8s.io/code-generator/cmd/client-gen/path"
)
// genFakeForType produces a file for each top-level type.
type genFakeForType struct {
generator.DefaultGen
outputPackage string
group string
version string
groupGoName string
inputPackage string
typeToMatch *types.Type
imports namer.ImportTracker
}
var _ generator.Generator = &genFakeForType{}
// Filter ignores all but one type because we're making a single file per type.
func (g *genFakeForType) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch }
func (g *genFakeForType) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *genFakeForType) Imports(c *generator.Context) (imports []string) {
return g.imports.ImportLines()
}
// Ideally, we'd like genStatus to return true if there is a subresource path
// registered for "status" in the API server, but we do not have that
// information, so genStatus returns true if the type has a status field.
func genStatus(t *types.Type) bool {
// Default to true if we have a Status member
hasStatus := false
for _, m := range t.Members {
if m.Name == "Status" {
hasStatus = true
break
}
}
tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
return hasStatus && !tags.NoStatus
}
// hasObjectMeta returns true if the type has a ObjectMeta field.
func hasObjectMeta(t *types.Type) bool {
for _, m := range t.Members {
if m.Embedded == true && m.Name == "ObjectMeta" {
return true
}
}
return false
}
// GenerateType makes the body of a file implementing the individual typed client for type t.
func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
pkg := filepath.Base(t.Name.Package)
tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
if err != nil {
return err
}
canonicalGroup := g.group
if canonicalGroup == "core" {
canonicalGroup = ""
}
groupName := g.group
if g.group == "core" {
groupName = ""
}
// allow user to define a group name that's different from the one parsed from the directory.
p := c.Universe.Package(path.Vendorless(g.inputPackage))
if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil {
groupName = override[0]
}
const pkgClientGoTesting = "k8s.io/client-go/testing"
m := map[string]interface{}{
"type": t,
"inputType": t,
"resultType": t,
"subresourcePath": "",
"package": pkg,
"Package": namer.IC(pkg),
"namespaced": !tags.NonNamespaced,
"Group": namer.IC(g.group),
"GroupGoName": g.groupGoName,
"Version": namer.IC(g.version),
"group": canonicalGroup,
"groupName": groupName,
"version": g.version,
"DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}),
"ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}),
"GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}),
"Everything": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/labels", Name: "Everything"}),
"GroupVersionResource": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionResource"}),
"GroupVersionKind": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionKind"}),
"PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}),
"watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}),
"NewRootListAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootListAction"}),
"NewListAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewListAction"}),
"NewRootGetAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetAction"}),
"NewGetAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetAction"}),
"NewRootDeleteAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteAction"}),
"NewDeleteAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteAction"}),
"NewRootDeleteCollectionAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootDeleteCollectionAction"}),
"NewDeleteCollectionAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewDeleteCollectionAction"}),
"NewRootUpdateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateAction"}),
"NewUpdateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateAction"}),
"NewRootCreateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateAction"}),
"NewCreateAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewCreateAction"}),
"NewRootWatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootWatchAction"}),
"NewWatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewWatchAction"}),
"NewCreateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewCreateSubresourceAction"}),
"NewRootCreateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootCreateSubresourceAction"}),
"NewUpdateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewUpdateSubresourceAction"}),
"NewGetSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewGetSubresourceAction"}),
"NewRootGetSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootGetSubresourceAction"}),
"NewRootUpdateSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootUpdateSubresourceAction"}),
"NewRootPatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchAction"}),
"NewPatchAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchAction"}),
"NewRootPatchSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewRootPatchSubresourceAction"}),
"NewPatchSubresourceAction": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "NewPatchSubresourceAction"}),
"ExtractFromListOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "ExtractFromListOptions"}),
}
if tags.NonNamespaced {
sw.Do(structNonNamespaced, m)
} else {
sw.Do(structNamespaced, m)
}
if tags.NoVerbs {
return sw.Error()
}
sw.Do(resource, m)
sw.Do(kind, m)
if tags.HasVerb("get") {
sw.Do(getTemplate, m)
}
if tags.HasVerb("list") {
if hasObjectMeta(t) {
sw.Do(listUsingOptionsTemplate, m)
} else {
sw.Do(listTemplate, m)
}
}
if tags.HasVerb("watch") {
sw.Do(watchTemplate, m)
}
if tags.HasVerb("create") {
sw.Do(createTemplate, m)
}
if tags.HasVerb("update") {
sw.Do(updateTemplate, m)
}
if tags.HasVerb("updateStatus") && genStatus(t) {
sw.Do(updateStatusTemplate, m)
}
if tags.HasVerb("delete") {
sw.Do(deleteTemplate, m)
}
if tags.HasVerb("deleteCollection") {
sw.Do(deleteCollectionTemplate, m)
}
if tags.HasVerb("patch") {
sw.Do(patchTemplate, m)
}
// generate extended client methods
for _, e := range tags.Extensions {
inputType := *t
resultType := *t
if len(e.InputTypeOverride) > 0 {
if name, pkg := e.Input(); len(pkg) > 0 {
newType := c.Universe.Type(types.Name{Package: pkg, Name: name})
inputType = *newType
} else {
inputType.Name.Name = e.InputTypeOverride
}
}
if len(e.ResultTypeOverride) > 0 {
if name, pkg := e.Result(); len(pkg) > 0 {
newType := c.Universe.Type(types.Name{Package: pkg, Name: name})
resultType = *newType
} else {
resultType.Name.Name = e.ResultTypeOverride
}
}
m["inputType"] = &inputType
m["resultType"] = &resultType
m["subresourcePath"] = e.SubResourcePath
if e.HasVerb("get") {
if e.IsSubresource() {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, getSubresourceTemplate), m)
} else {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, getTemplate), m)
}
}
if e.HasVerb("list") {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, listTemplate), m)
}
// TODO: Figure out schemantic for watching a sub-resource.
if e.HasVerb("watch") {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, watchTemplate), m)
}
if e.HasVerb("create") {
if e.IsSubresource() {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, createSubresourceTemplate), m)
} else {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, createTemplate), m)
}
}
if e.HasVerb("update") {
if e.IsSubresource() {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateSubresourceTemplate), m)
} else {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateTemplate), m)
}
}
// TODO: Figure out schemantic for deleting a sub-resource (what arguments
// are passed, does it need two names? etc.
if e.HasVerb("delete") {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, deleteTemplate), m)
}
if e.HasVerb("patch") {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, patchTemplate), m)
}
}
return sw.Error()
}
// adjustTemplate adjust the origin verb template using the expansion name.
// TODO: Make the verbs in templates parametrized so the strings.Replace() is
// not needed.
func adjustTemplate(name, verbType, template string) string {
return strings.Replace(template, " "+strings.Title(verbType), " "+name, -1)
}
// template for the struct that implements the type's interface
var structNamespaced = `
// Fake$.type|publicPlural$ implements $.type|public$Interface
type Fake$.type|publicPlural$ struct {
Fake *Fake$.GroupGoName$$.Version$
ns string
}
`
// template for the struct that implements the type's interface
var structNonNamespaced = `
// Fake$.type|publicPlural$ implements $.type|public$Interface
type Fake$.type|publicPlural$ struct {
Fake *Fake$.GroupGoName$$.Version$
}
`
var resource = `
var $.type|allLowercasePlural$Resource = $.GroupVersionResource|raw${Group: "$.groupName$", Version: "$.version$", Resource: "$.type|resource$"}
`
var kind = `
var $.type|allLowercasePlural$Kind = $.GroupVersionKind|raw${Group: "$.groupName$", Version: "$.version$", Kind: "$.type|singularKind$"}
`
var listTemplate = `
// List takes label and field selectors, and returns the list of $.type|publicPlural$ that match those selectors.
func (c *Fake$.type|publicPlural$) List(opts $.ListOptions|raw$) (result *$.type|raw$List, err error) {
obj, err := c.Fake.
$if .namespaced$Invokes($.NewListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), &$.type|raw$List{})
$else$Invokes($.NewRootListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), &$.type|raw$List{})$end$
if obj == nil {
return nil, err
}
return obj.(*$.type|raw$List), err
}
`
var listUsingOptionsTemplate = `
// List takes label and field selectors, and returns the list of $.type|publicPlural$ that match those selectors.
func (c *Fake$.type|publicPlural$) List(opts $.ListOptions|raw$) (result *$.type|raw$List, err error) {
obj, err := c.Fake.
$if .namespaced$Invokes($.NewListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, c.ns, opts), &$.type|raw$List{})
$else$Invokes($.NewRootListAction|raw$($.type|allLowercasePlural$Resource, $.type|allLowercasePlural$Kind, opts), &$.type|raw$List{})$end$
if obj == nil {
return nil, err
}
label, _, _ := $.ExtractFromListOptions|raw$(opts)
if label == nil {
label = $.Everything|raw$()
}
list := &$.type|raw$List{ListMeta: obj.(*$.type|raw$List).ListMeta}
for _, item := range obj.(*$.type|raw$List).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
`
var getTemplate = `
// Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any.
func (c *Fake$.type|publicPlural$) Get(name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) {
obj, err := c.Fake.
$if .namespaced$Invokes($.NewGetAction|raw$($.type|allLowercasePlural$Resource, c.ns, name), &$.resultType|raw${})
$else$Invokes($.NewRootGetAction|raw$($.type|allLowercasePlural$Resource, name), &$.resultType|raw${})$end$
if obj == nil {
return nil, err
}
return obj.(*$.resultType|raw$), err
}
`
var getSubresourceTemplate = `
// Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any.
func (c *Fake$.type|publicPlural$) Get($.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) {
obj, err := c.Fake.
$if .namespaced$Invokes($.NewGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${})
$else$Invokes($.NewRootGetSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.type|private$Name), &$.resultType|raw${})$end$
if obj == nil {
return nil, err
}
return obj.(*$.resultType|raw$), err
}
`
var deleteTemplate = `
// Delete takes name of the $.type|private$ and deletes it. Returns an error if one occurs.
func (c *Fake$.type|publicPlural$) Delete(name string, options *$.DeleteOptions|raw$) error {
_, err := c.Fake.
$if .namespaced$Invokes($.NewDeleteAction|raw$($.type|allLowercasePlural$Resource, c.ns, name), &$.type|raw${})
$else$Invokes($.NewRootDeleteAction|raw$($.type|allLowercasePlural$Resource, name), &$.type|raw${})$end$
return err
}
`
var deleteCollectionTemplate = `
// DeleteCollection deletes a collection of objects.
func (c *Fake$.type|publicPlural$) DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error {
$if .namespaced$action := $.NewDeleteCollectionAction|raw$($.type|allLowercasePlural$Resource, c.ns, listOptions)
$else$action := $.NewRootDeleteCollectionAction|raw$($.type|allLowercasePlural$Resource, listOptions)
$end$
_, err := c.Fake.Invokes(action, &$.type|raw$List{})
return err
}
`
var createTemplate = `
// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any.
func (c *Fake$.type|publicPlural$) Create($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) {
obj, err := c.Fake.
$if .namespaced$Invokes($.NewCreateAction|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$), &$.resultType|raw${})
$else$Invokes($.NewRootCreateAction|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$), &$.resultType|raw${})$end$
if obj == nil {
return nil, err
}
return obj.(*$.resultType|raw$), err
}
`
var createSubresourceTemplate = `
// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any.
func (c *Fake$.type|publicPlural$) Create($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) {
obj, err := c.Fake.
$if .namespaced$Invokes($.NewCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, "$.subresourcePath$", c.ns, $.inputType|private$), &$.resultType|raw${})
$else$Invokes($.NewRootCreateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$
if obj == nil {
return nil, err
}
return obj.(*$.resultType|raw$), err
}
`
var updateTemplate = `
// Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any.
func (c *Fake$.type|publicPlural$) Update($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) {
obj, err := c.Fake.
$if .namespaced$Invokes($.NewUpdateAction|raw$($.inputType|allLowercasePlural$Resource, c.ns, $.inputType|private$), &$.resultType|raw${})
$else$Invokes($.NewRootUpdateAction|raw$($.inputType|allLowercasePlural$Resource, $.inputType|private$), &$.resultType|raw${})$end$
if obj == nil {
return nil, err
}
return obj.(*$.resultType|raw$), err
}
`
var updateSubresourceTemplate = `
// Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any.
func (c *Fake$.type|publicPlural$) Update($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) {
obj, err := c.Fake.
$if .namespaced$Invokes($.NewUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", c.ns, $.inputType|private$), &$.inputType|raw${})
$else$Invokes($.NewRootUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "$.subresourcePath$", $.inputType|private$), &$.resultType|raw${})$end$
if obj == nil {
return nil, err
}
return obj.(*$.resultType|raw$), err
}
`
var updateStatusTemplate = `
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *Fake$.type|publicPlural$) UpdateStatus($.type|private$ *$.type|raw$) (*$.type|raw$, error) {
obj, err := c.Fake.
$if .namespaced$Invokes($.NewUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "status", c.ns, $.type|private$), &$.type|raw${})
$else$Invokes($.NewRootUpdateSubresourceAction|raw$($.type|allLowercasePlural$Resource, "status", $.type|private$), &$.type|raw${})$end$
if obj == nil {
return nil, err
}
return obj.(*$.type|raw$), err
}
`
var watchTemplate = `
// Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$.
func (c *Fake$.type|publicPlural$) Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error) {
return c.Fake.
$if .namespaced$InvokesWatch($.NewWatchAction|raw$($.type|allLowercasePlural$Resource, c.ns, opts))
$else$InvokesWatch($.NewRootWatchAction|raw$($.type|allLowercasePlural$Resource, opts))$end$
}
`
var patchTemplate = `
// Patch applies the patch and returns the patched $.resultType|private$.
func (c *Fake$.type|publicPlural$) Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error) {
obj, err := c.Fake.
$if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, name, pt, data, subresources... ), &$.resultType|raw${})
$else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, name, pt, data, subresources...), &$.resultType|raw${})$end$
if obj == nil {
return nil, err
}
return obj.(*$.resultType|raw$), err
}
`

View File

@ -0,0 +1,178 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"fmt"
"io"
"path/filepath"
"strings"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
)
// genClientset generates a package for a clientset.
type genClientset struct {
generator.DefaultGen
groups []clientgentypes.GroupVersions
groupGoNames map[clientgentypes.GroupVersion]string
clientsetPackage string
outputPackage string
imports namer.ImportTracker
clientsetGenerated bool
}
var _ generator.Generator = &genClientset{}
func (g *genClientset) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
// We only want to call GenerateType() once.
func (g *genClientset) Filter(c *generator.Context, t *types.Type) bool {
ret := !g.clientsetGenerated
g.clientsetGenerated = true
return ret
}
func (g *genClientset) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
for _, group := range g.groups {
for _, version := range group.Versions {
typedClientPath := filepath.Join(g.clientsetPackage, "typed", strings.ToLower(group.PackageName), strings.ToLower(version.NonEmpty()))
groupAlias := strings.ToLower(g.groupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}])
imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.NonEmpty()), typedClientPath))
}
}
return
}
func (g *genClientset) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
// TODO: We actually don't need any type information to generate the clientset,
// perhaps we can adapt the go2ild framework to this kind of usage.
sw := generator.NewSnippetWriter(w, c, "$", "$")
allGroups := clientgentypes.ToGroupVersionInfo(g.groups, g.groupGoNames)
m := map[string]interface{}{
"allGroups": allGroups,
"Config": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}),
"DefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}),
"RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}),
"DiscoveryInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/discovery", Name: "DiscoveryInterface"}),
"DiscoveryClient": c.Universe.Type(types.Name{Package: "k8s.io/client-go/discovery", Name: "DiscoveryClient"}),
"NewDiscoveryClientForConfig": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfig"}),
"NewDiscoveryClientForConfigOrDie": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClientForConfigOrDie"}),
"NewDiscoveryClient": c.Universe.Function(types.Name{Package: "k8s.io/client-go/discovery", Name: "NewDiscoveryClient"}),
"flowcontrolNewTokenBucketRateLimiter": c.Universe.Function(types.Name{Package: "k8s.io/client-go/util/flowcontrol", Name: "NewTokenBucketRateLimiter"}),
}
sw.Do(clientsetInterface, m)
sw.Do(clientsetTemplate, m)
for _, g := range allGroups {
sw.Do(clientsetInterfaceImplTemplate, g)
}
sw.Do(getDiscoveryTemplate, m)
sw.Do(newClientsetForConfigTemplate, m)
sw.Do(newClientsetForConfigOrDieTemplate, m)
sw.Do(newClientsetForRESTClientTemplate, m)
return sw.Error()
}
var clientsetInterface = `
type Interface interface {
Discovery() $.DiscoveryInterface|raw$
$range .allGroups$$.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface
$end$
}
`
var clientsetTemplate = `
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*$.DiscoveryClient|raw$
$range .allGroups$$.LowerCaseGroupGoName$$.Version$ *$.PackageAlias$.$.GroupGoName$$.Version$Client
$end$
}
`
var clientsetInterfaceImplTemplate = `
// $.GroupGoName$$.Version$ retrieves the $.GroupGoName$$.Version$Client
func (c *Clientset) $.GroupGoName$$.Version$() $.PackageAlias$.$.GroupGoName$$.Version$Interface {
return c.$.LowerCaseGroupGoName$$.Version$
}
`
var getDiscoveryTemplate = `
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() $.DiscoveryInterface|raw$ {
if c == nil {
return nil
}
return c.DiscoveryClient
}
`
var newClientsetForConfigTemplate = `
// NewForConfig creates a new Clientset for the given config.
func NewForConfig(c *$.Config|raw$) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
configShallowCopy.RateLimiter = $.flowcontrolNewTokenBucketRateLimiter|raw$(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$, err =$.PackageAlias$.NewForConfig(&configShallowCopy)
if err!=nil {
return nil, err
}
$end$
cs.DiscoveryClient, err = $.NewDiscoveryClientForConfig|raw$(&configShallowCopy)
if err!=nil {
return nil, err
}
return &cs, nil
}
`
var newClientsetForConfigOrDieTemplate = `
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *$.Config|raw$) *Clientset {
var cs Clientset
$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$ =$.PackageAlias$.NewForConfigOrDie(c)
$end$
cs.DiscoveryClient = $.NewDiscoveryClientForConfigOrDie|raw$(c)
return &cs
}
`
var newClientsetForRESTClientTemplate = `
// New creates a new Clientset for the given RESTClient.
func New(c $.RESTClientInterface|raw$) *Clientset {
var cs Clientset
$range .allGroups$ cs.$.LowerCaseGroupGoName$$.Version$ =$.PackageAlias$.New(c)
$end$
cs.DiscoveryClient = $.NewDiscoveryClient|raw$(c)
return &cs
}
`

View File

@ -0,0 +1,54 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"io"
"os"
"path/filepath"
"strings"
"k8s.io/gengo/generator"
"k8s.io/gengo/types"
)
// genExpansion produces a file for a group client, e.g. ExtensionsClient for the extension group.
type genExpansion struct {
generator.DefaultGen
groupPackagePath string
// types in a group
types []*types.Type
}
// We only want to call GenerateType() once per group.
func (g *genExpansion) Filter(c *generator.Context, t *types.Type) bool {
return len(g.types) == 0 || t == g.types[0]
}
func (g *genExpansion) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
for _, t := range g.types {
if _, err := os.Stat(filepath.Join(g.groupPackagePath, strings.ToLower(t.Name.Name+"_expansion.go"))); os.IsNotExist(err) {
sw.Do(expansionInterfaceTemplate, t)
}
}
return sw.Error()
}
var expansionInterfaceTemplate = `
type $.|public$Expansion interface {}
`

View File

@ -0,0 +1,247 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"io"
"path/filepath"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/code-generator/cmd/client-gen/generators/util"
"k8s.io/code-generator/cmd/client-gen/path"
)
// genGroup produces a file for a group client, e.g. ExtensionsClient for the extension group.
type genGroup struct {
generator.DefaultGen
outputPackage string
group string
version string
groupGoName string
apiPath string
// types in this group
types []*types.Type
imports namer.ImportTracker
inputPackage string
clientsetPackage string
// If the genGroup has been called. This generator should only execute once.
called bool
}
var _ generator.Generator = &genGroup{}
// We only want to call GenerateType() once per group.
func (g *genGroup) Filter(c *generator.Context, t *types.Type) bool {
if !g.called {
g.called = true
return true
}
return false
}
func (g *genGroup) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *genGroup) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
imports = append(imports, filepath.Join(g.clientsetPackage, "scheme"))
return
}
func (g *genGroup) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
apiPath := func(group string) string {
if group == "core" {
return `"/api"`
}
return `"` + g.apiPath + `"`
}
groupName := g.group
if g.group == "core" {
groupName = ""
}
// allow user to define a group name that's different from the one parsed from the directory.
p := c.Universe.Package(path.Vendorless(g.inputPackage))
if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil {
groupName = override[0]
}
m := map[string]interface{}{
"group": g.group,
"version": g.version,
"groupName": groupName,
"GroupGoName": g.groupGoName,
"Version": namer.IC(g.version),
"types": g.types,
"apiPath": apiPath(g.group),
"schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}),
"runtimeAPIVersionInternal": c.Universe.Variable(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "APIVersionInternal"}),
"serializerDirectCodecFactory": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/serializer", Name: "DirectCodecFactory"}),
"restConfig": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Config"}),
"restDefaultKubernetesUserAgent": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "DefaultKubernetesUserAgent"}),
"restRESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}),
"restRESTClientFor": c.Universe.Function(types.Name{Package: "k8s.io/client-go/rest", Name: "RESTClientFor"}),
"SchemeGroupVersion": c.Universe.Variable(types.Name{Package: path.Vendorless(g.inputPackage), Name: "SchemeGroupVersion"}),
}
sw.Do(groupInterfaceTemplate, m)
sw.Do(groupClientTemplate, m)
for _, t := range g.types {
tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
if err != nil {
return err
}
wrapper := map[string]interface{}{
"type": t,
"GroupGoName": g.groupGoName,
"Version": namer.IC(g.version),
}
if tags.NonNamespaced {
sw.Do(getterImplNonNamespaced, wrapper)
} else {
sw.Do(getterImplNamespaced, wrapper)
}
}
sw.Do(newClientForConfigTemplate, m)
sw.Do(newClientForConfigOrDieTemplate, m)
sw.Do(newClientForRESTClientTemplate, m)
if g.version == "" {
sw.Do(setInternalVersionClientDefaultsTemplate, m)
} else {
sw.Do(setClientDefaultsTemplate, m)
}
sw.Do(getRESTClient, m)
return sw.Error()
}
var groupInterfaceTemplate = `
type $.GroupGoName$$.Version$Interface interface {
RESTClient() $.restRESTClientInterface|raw$
$range .types$ $.|publicPlural$Getter
$end$
}
`
var groupClientTemplate = `
// $.GroupGoName$$.Version$Client is used to interact with features provided by the $.groupName$ group.
type $.GroupGoName$$.Version$Client struct {
restClient $.restRESTClientInterface|raw$
}
`
var getterImplNamespaced = `
func (c *$.GroupGoName$$.Version$Client) $.type|publicPlural$(namespace string) $.type|public$Interface {
return new$.type|publicPlural$(c, namespace)
}
`
var getterImplNonNamespaced = `
func (c *$.GroupGoName$$.Version$Client) $.type|publicPlural$() $.type|public$Interface {
return new$.type|publicPlural$(c)
}
`
var newClientForConfigTemplate = `
// NewForConfig creates a new $.GroupGoName$$.Version$Client for the given config.
func NewForConfig(c *$.restConfig|raw$) (*$.GroupGoName$$.Version$Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := $.restRESTClientFor|raw$(&config)
if err != nil {
return nil, err
}
return &$.GroupGoName$$.Version$Client{client}, nil
}
`
var newClientForConfigOrDieTemplate = `
// NewForConfigOrDie creates a new $.GroupGoName$$.Version$Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *$.restConfig|raw$) *$.GroupGoName$$.Version$Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
`
var getRESTClient = `
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *$.GroupGoName$$.Version$Client) RESTClient() $.restRESTClientInterface|raw$ {
if c == nil {
return nil
}
return c.restClient
}
`
var newClientForRESTClientTemplate = `
// New creates a new $.GroupGoName$$.Version$Client for the given RESTClient.
func New(c $.restRESTClientInterface|raw$) *$.GroupGoName$$.Version$Client {
return &$.GroupGoName$$.Version$Client{c}
}
`
var setInternalVersionClientDefaultsTemplate = `
func setConfigDefaults(config *$.restConfig|raw$) error {
config.APIPath = $.apiPath$
if config.UserAgent == "" {
config.UserAgent = $.restDefaultKubernetesUserAgent|raw$()
}
if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("$.groupName$")[0].Group {
gv := scheme.Scheme.PrioritizedVersionsForGroup("$.groupName$")[0]
config.GroupVersion = &gv
}
config.NegotiatedSerializer = scheme.Codecs
if config.QPS == 0 {
config.QPS = 5
}
if config.Burst == 0 {
config.Burst = 10
}
return nil
}
`
var setClientDefaultsTemplate = `
func setConfigDefaults(config *$.restConfig|raw$) error {
gv := $.SchemeGroupVersion|raw$
config.GroupVersion = &gv
config.APIPath = $.apiPath$
config.NegotiatedSerializer = $.serializerDirectCodecFactory|raw${CodecFactory: scheme.Codecs}
if config.UserAgent == "" {
config.UserAgent = $.restDefaultKubernetesUserAgent|raw$()
}
return nil
}
`

View File

@ -0,0 +1,599 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"io"
"path/filepath"
"strings"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/code-generator/cmd/client-gen/generators/util"
)
// genClientForType produces a file for each top-level type.
type genClientForType struct {
generator.DefaultGen
outputPackage string
clientsetPackage string
group string
version string
groupGoName string
typeToMatch *types.Type
imports namer.ImportTracker
}
var _ generator.Generator = &genClientForType{}
// Filter ignores all but one type because we're making a single file per type.
func (g *genClientForType) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch }
func (g *genClientForType) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *genClientForType) Imports(c *generator.Context) (imports []string) {
return g.imports.ImportLines()
}
// Ideally, we'd like genStatus to return true if there is a subresource path
// registered for "status" in the API server, but we do not have that
// information, so genStatus returns true if the type has a status field.
func genStatus(t *types.Type) bool {
// Default to true if we have a Status member
hasStatus := false
for _, m := range t.Members {
if m.Name == "Status" {
hasStatus = true
break
}
}
return hasStatus && !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).NoStatus
}
// GenerateType makes the body of a file implementing the individual typed client for type t.
func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
pkg := filepath.Base(t.Name.Package)
tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
if err != nil {
return err
}
type extendedInterfaceMethod struct {
template string
args map[string]interface{}
}
extendedMethods := []extendedInterfaceMethod{}
for _, e := range tags.Extensions {
inputType := *t
resultType := *t
// TODO: Extract this to some helper method as this code is copied into
// 2 other places.
if len(e.InputTypeOverride) > 0 {
if name, pkg := e.Input(); len(pkg) > 0 {
newType := c.Universe.Type(types.Name{Package: pkg, Name: name})
inputType = *newType
} else {
inputType.Name.Name = e.InputTypeOverride
}
}
if len(e.ResultTypeOverride) > 0 {
if name, pkg := e.Result(); len(pkg) > 0 {
newType := c.Universe.Type(types.Name{Package: pkg, Name: name})
resultType = *newType
} else {
resultType.Name.Name = e.ResultTypeOverride
}
}
var updatedVerbtemplate string
if _, exists := subresourceDefaultVerbTemplates[e.VerbType]; e.IsSubresource() && exists {
updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(subresourceDefaultVerbTemplates[e.VerbType], strings.Title(e.VerbType)+"(")
} else {
updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(defaultVerbTemplates[e.VerbType], strings.Title(e.VerbType)+"(")
}
extendedMethods = append(extendedMethods, extendedInterfaceMethod{
template: updatedVerbtemplate,
args: map[string]interface{}{
"type": t,
"inputType": &inputType,
"resultType": &resultType,
"DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}),
"ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}),
"GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}),
"PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}),
},
})
}
m := map[string]interface{}{
"type": t,
"inputType": t,
"resultType": t,
"package": pkg,
"Package": namer.IC(pkg),
"namespaced": !tags.NonNamespaced,
"Group": namer.IC(g.group),
"subresource": false,
"subresourcePath": "",
"GroupGoName": g.groupGoName,
"Version": namer.IC(g.version),
"DeleteOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "DeleteOptions"}),
"ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}),
"GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}),
"PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}),
"watchInterface": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}),
"RESTClientInterface": c.Universe.Type(types.Name{Package: "k8s.io/client-go/rest", Name: "Interface"}),
"schemeParameterCodec": c.Universe.Variable(types.Name{Package: filepath.Join(g.clientsetPackage, "scheme"), Name: "ParameterCodec"}),
}
sw.Do(getterComment, m)
if tags.NonNamespaced {
sw.Do(getterNonNamespaced, m)
} else {
sw.Do(getterNamespaced, m)
}
sw.Do(interfaceTemplate1, m)
if !tags.NoVerbs {
if !genStatus(t) {
tags.SkipVerbs = append(tags.SkipVerbs, "updateStatus")
}
interfaceSuffix := ""
if len(extendedMethods) > 0 {
interfaceSuffix = "\n"
}
sw.Do("\n"+generateInterface(tags)+interfaceSuffix, m)
// add extended verbs into interface
for _, v := range extendedMethods {
sw.Do(v.template+interfaceSuffix, v.args)
}
}
sw.Do(interfaceTemplate4, m)
if tags.NonNamespaced {
sw.Do(structNonNamespaced, m)
sw.Do(newStructNonNamespaced, m)
} else {
sw.Do(structNamespaced, m)
sw.Do(newStructNamespaced, m)
}
if tags.NoVerbs {
return sw.Error()
}
if tags.HasVerb("get") {
sw.Do(getTemplate, m)
}
if tags.HasVerb("list") {
sw.Do(listTemplate, m)
}
if tags.HasVerb("watch") {
sw.Do(watchTemplate, m)
}
if tags.HasVerb("create") {
sw.Do(createTemplate, m)
}
if tags.HasVerb("update") {
sw.Do(updateTemplate, m)
}
if tags.HasVerb("updateStatus") {
sw.Do(updateStatusTemplate, m)
}
if tags.HasVerb("delete") {
sw.Do(deleteTemplate, m)
}
if tags.HasVerb("deleteCollection") {
sw.Do(deleteCollectionTemplate, m)
}
if tags.HasVerb("patch") {
sw.Do(patchTemplate, m)
}
// generate expansion methods
for _, e := range tags.Extensions {
inputType := *t
resultType := *t
if len(e.InputTypeOverride) > 0 {
if name, pkg := e.Input(); len(pkg) > 0 {
newType := c.Universe.Type(types.Name{Package: pkg, Name: name})
inputType = *newType
} else {
inputType.Name.Name = e.InputTypeOverride
}
}
if len(e.ResultTypeOverride) > 0 {
if name, pkg := e.Result(); len(pkg) > 0 {
newType := c.Universe.Type(types.Name{Package: pkg, Name: name})
resultType = *newType
} else {
resultType.Name.Name = e.ResultTypeOverride
}
}
m["inputType"] = &inputType
m["resultType"] = &resultType
m["subresourcePath"] = e.SubResourcePath
if e.HasVerb("get") {
if e.IsSubresource() {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, getSubresourceTemplate), m)
} else {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, getTemplate), m)
}
}
if e.HasVerb("list") {
if e.IsSubresource() {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, listSubresourceTemplate), m)
} else {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, listTemplate), m)
}
}
// TODO: Figure out schemantic for watching a sub-resource.
if e.HasVerb("watch") {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, watchTemplate), m)
}
if e.HasVerb("create") {
if e.IsSubresource() {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, createSubresourceTemplate), m)
} else {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, createTemplate), m)
}
}
if e.HasVerb("update") {
if e.IsSubresource() {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateSubresourceTemplate), m)
} else {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, updateTemplate), m)
}
}
// TODO: Figure out schemantic for deleting a sub-resource (what arguments
// are passed, does it need two names? etc.
if e.HasVerb("delete") {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, deleteTemplate), m)
}
if e.HasVerb("patch") {
sw.Do(adjustTemplate(e.VerbName, e.VerbType, patchTemplate), m)
}
}
return sw.Error()
}
// adjustTemplate adjust the origin verb template using the expansion name.
// TODO: Make the verbs in templates parametrized so the strings.Replace() is
// not needed.
func adjustTemplate(name, verbType, template string) string {
return strings.Replace(template, " "+strings.Title(verbType), " "+name, -1)
}
func generateInterface(tags util.Tags) string {
// need an ordered list here to guarantee order of generated methods.
out := []string{}
for _, m := range util.SupportedVerbs {
if tags.HasVerb(m) {
out = append(out, defaultVerbTemplates[m])
}
}
return strings.Join(out, "\n")
}
var subresourceDefaultVerbTemplates = map[string]string{
"create": `Create($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (*$.resultType|raw$, error)`,
"list": `List($.type|private$Name string, opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`,
"update": `Update($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (*$.resultType|raw$, error)`,
"get": `Get($.type|private$Name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`,
}
var defaultVerbTemplates = map[string]string{
"create": `Create(*$.inputType|raw$) (*$.resultType|raw$, error)`,
"update": `Update(*$.inputType|raw$) (*$.resultType|raw$, error)`,
"updateStatus": `UpdateStatus(*$.type|raw$) (*$.type|raw$, error)`,
"delete": `Delete(name string, options *$.DeleteOptions|raw$) error`,
"deleteCollection": `DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error`,
"get": `Get(name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`,
"list": `List(opts $.ListOptions|raw$) (*$.resultType|raw$List, error)`,
"watch": `Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error)`,
"patch": `Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error)`,
}
// group client will implement this interface.
var getterComment = `
// $.type|publicPlural$Getter has a method to return a $.type|public$Interface.
// A group's client should implement this interface.`
var getterNamespaced = `
type $.type|publicPlural$Getter interface {
$.type|publicPlural$(namespace string) $.type|public$Interface
}
`
var getterNonNamespaced = `
type $.type|publicPlural$Getter interface {
$.type|publicPlural$() $.type|public$Interface
}
`
// this type's interface, typed client will implement this interface.
var interfaceTemplate1 = `
// $.type|public$Interface has methods to work with $.type|public$ resources.
type $.type|public$Interface interface {`
var interfaceTemplate4 = `
$.type|public$Expansion
}
`
// template for the struct that implements the type's interface
var structNamespaced = `
// $.type|privatePlural$ implements $.type|public$Interface
type $.type|privatePlural$ struct {
client $.RESTClientInterface|raw$
ns string
}
`
// template for the struct that implements the type's interface
var structNonNamespaced = `
// $.type|privatePlural$ implements $.type|public$Interface
type $.type|privatePlural$ struct {
client $.RESTClientInterface|raw$
}
`
var newStructNamespaced = `
// new$.type|publicPlural$ returns a $.type|publicPlural$
func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client, namespace string) *$.type|privatePlural$ {
return &$.type|privatePlural${
client: c.RESTClient(),
ns: namespace,
}
}
`
var newStructNonNamespaced = `
// new$.type|publicPlural$ returns a $.type|publicPlural$
func new$.type|publicPlural$(c *$.GroupGoName$$.Version$Client) *$.type|privatePlural$ {
return &$.type|privatePlural${
client: c.RESTClient(),
}
}
`
var listTemplate = `
// List takes label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors.
func (c *$.type|privatePlural$) List(opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil{
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &$.resultType|raw$List{}
err = c.client.Get().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
VersionedParams(&opts, $.schemeParameterCodec|raw$).
Timeout(timeout).
Do().
Into(result)
return
}
`
var listSubresourceTemplate = `
// List takes $.type|raw$ name, label and field selectors, and returns the list of $.resultType|publicPlural$ that match those selectors.
func (c *$.type|privatePlural$) List($.type|private$Name string, opts $.ListOptions|raw$) (result *$.resultType|raw$List, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil{
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &$.resultType|raw$List{}
err = c.client.Get().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
Name($.type|private$Name).
SubResource("$.subresourcePath$").
VersionedParams(&opts, $.schemeParameterCodec|raw$).
Timeout(timeout).
Do().
Into(result)
return
}
`
var getTemplate = `
// Get takes name of the $.type|private$, and returns the corresponding $.resultType|private$ object, and an error if there is any.
func (c *$.type|privatePlural$) Get(name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) {
result = &$.resultType|raw${}
err = c.client.Get().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
Name(name).
VersionedParams(&options, $.schemeParameterCodec|raw$).
Do().
Into(result)
return
}
`
var getSubresourceTemplate = `
// Get takes name of the $.type|private$, and returns the corresponding $.resultType|raw$ object, and an error if there is any.
func (c *$.type|privatePlural$) Get($.type|private$Name string, options $.GetOptions|raw$) (result *$.resultType|raw$, err error) {
result = &$.resultType|raw${}
err = c.client.Get().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
Name($.type|private$Name).
SubResource("$.subresourcePath$").
VersionedParams(&options, $.schemeParameterCodec|raw$).
Do().
Into(result)
return
}
`
var deleteTemplate = `
// Delete takes name of the $.type|private$ and deletes it. Returns an error if one occurs.
func (c *$.type|privatePlural$) Delete(name string, options *$.DeleteOptions|raw$) error {
return c.client.Delete().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
Name(name).
Body(options).
Do().
Error()
}
`
var deleteCollectionTemplate = `
// DeleteCollection deletes a collection of objects.
func (c *$.type|privatePlural$) DeleteCollection(options *$.DeleteOptions|raw$, listOptions $.ListOptions|raw$) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil{
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
VersionedParams(&listOptions, $.schemeParameterCodec|raw$).
Timeout(timeout).
Body(options).
Do().
Error()
}
`
var createSubresourceTemplate = `
// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any.
func (c *$.type|privatePlural$) Create($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) {
result = &$.resultType|raw${}
err = c.client.Post().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
Name($.type|private$Name).
SubResource("$.subresourcePath$").
Body($.inputType|private$).
Do().
Into(result)
return
}
`
var createTemplate = `
// Create takes the representation of a $.inputType|private$ and creates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any.
func (c *$.type|privatePlural$) Create($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) {
result = &$.resultType|raw${}
err = c.client.Post().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
Body($.inputType|private$).
Do().
Into(result)
return
}
`
var updateSubresourceTemplate = `
// Update takes the top resource name and the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any.
func (c *$.type|privatePlural$) Update($.type|private$Name string, $.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) {
result = &$.resultType|raw${}
err = c.client.Put().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
Name($.type|private$Name).
SubResource("$.subresourcePath$").
Body($.inputType|private$).
Do().
Into(result)
return
}
`
var updateTemplate = `
// Update takes the representation of a $.inputType|private$ and updates it. Returns the server's representation of the $.resultType|private$, and an error, if there is any.
func (c *$.type|privatePlural$) Update($.inputType|private$ *$.inputType|raw$) (result *$.resultType|raw$, err error) {
result = &$.resultType|raw${}
err = c.client.Put().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
Name($.inputType|private$.Name).
Body($.inputType|private$).
Do().
Into(result)
return
}
`
var updateStatusTemplate = `
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *$.type|privatePlural$) UpdateStatus($.type|private$ *$.type|raw$) (result *$.type|raw$, err error) {
result = &$.type|raw${}
err = c.client.Put().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
Name($.type|private$.Name).
SubResource("status").
Body($.type|private$).
Do().
Into(result)
return
}
`
var watchTemplate = `
// Watch returns a $.watchInterface|raw$ that watches the requested $.type|privatePlural$.
func (c *$.type|privatePlural$) Watch(opts $.ListOptions|raw$) ($.watchInterface|raw$, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil{
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
VersionedParams(&opts, $.schemeParameterCodec|raw$).
Timeout(timeout).
Watch()
}
`
var patchTemplate = `
// Patch applies the patch and returns the patched $.resultType|private$.
func (c *$.type|privatePlural$) Patch(name string, pt $.PatchType|raw$, data []byte, subresources ...string) (result *$.resultType|raw$, err error) {
result = &$.resultType|raw${}
err = c.client.Patch(pt).
$if .namespaced$Namespace(c.ns).$end$
Resource("$.type|resource$").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}
`

View File

@ -0,0 +1,186 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"k8s.io/code-generator/cmd/client-gen/path"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
)
// GenScheme produces a package for a clientset with the scheme, codecs and parameter codecs.
type GenScheme struct {
generator.DefaultGen
OutputPackage string
Groups []clientgentypes.GroupVersions
GroupGoNames map[clientgentypes.GroupVersion]string
InputPackages map[clientgentypes.GroupVersion]string
OutputPath string
ImportTracker namer.ImportTracker
PrivateScheme bool
CreateRegistry bool
schemeGenerated bool
}
func (g *GenScheme) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.OutputPackage, g.ImportTracker),
}
}
// We only want to call GenerateType() once.
func (g *GenScheme) Filter(c *generator.Context, t *types.Type) bool {
ret := !g.schemeGenerated
g.schemeGenerated = true
return ret
}
func (g *GenScheme) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.ImportTracker.ImportLines()...)
for _, group := range g.Groups {
for _, version := range group.Versions {
packagePath := g.InputPackages[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}]
groupAlias := strings.ToLower(g.GroupGoNames[clientgentypes.GroupVersion{Group: group.Group, Version: version.Version}])
if g.CreateRegistry {
// import the install package for internal clientsets instead of the type package with register.go
if version.Version != "" {
packagePath = filepath.Dir(packagePath)
}
packagePath = filepath.Join(packagePath, "install")
imports = append(imports, fmt.Sprintf("%s \"%s\"", groupAlias, path.Vendorless(packagePath)))
break
} else {
imports = append(imports, fmt.Sprintf("%s%s \"%s\"", groupAlias, strings.ToLower(version.Version.NonEmpty()), path.Vendorless(packagePath)))
}
}
}
return
}
func (g *GenScheme) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
allGroupVersions := clientgentypes.ToGroupVersionInfo(g.Groups, g.GroupGoNames)
allInstallGroups := clientgentypes.ToGroupInstallPackages(g.Groups, g.GroupGoNames)
m := map[string]interface{}{
"allGroupVersions": allGroupVersions,
"allInstallGroups": allInstallGroups,
"customRegister": false,
"runtimeNewParameterCodec": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "NewParameterCodec"}),
"runtimeNewScheme": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "NewScheme"}),
"serializerNewCodecFactory": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/serializer", Name: "NewCodecFactory"}),
"runtimeScheme": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "Scheme"}),
"runtimeSchemeBuilder": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "SchemeBuilder"}),
"runtimeUtilMust": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/util/runtime", Name: "Must"}),
"schemaGroupVersion": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersion"}),
"metav1AddToGroupVersion": c.Universe.Function(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "AddToGroupVersion"}),
}
globals := map[string]string{
"Scheme": "Scheme",
"Codecs": "Codecs",
"ParameterCodec": "ParameterCodec",
"Registry": "Registry",
}
for k, v := range globals {
if g.PrivateScheme {
m[k] = strings.ToLower(v[0:1]) + v[1:]
} else {
m[k] = v
}
}
sw.Do(globalsTemplate, m)
if g.OutputPath != "" {
if _, err := os.Stat(filepath.Join(g.OutputPath, strings.ToLower("register_custom.go"))); err == nil {
m["customRegister"] = true
}
}
if g.CreateRegistry {
sw.Do(registryRegistration, m)
} else {
sw.Do(simpleRegistration, m)
}
return sw.Error()
}
var globalsTemplate = `
var $.Scheme$ = $.runtimeNewScheme|raw$()
var $.Codecs$ = $.serializerNewCodecFactory|raw$($.Scheme$)
var $.ParameterCodec$ = $.runtimeNewParameterCodec|raw$($.Scheme$)`
var registryRegistration = `
func init() {
$.metav1AddToGroupVersion|raw$($.Scheme$, $.schemaGroupVersion|raw${Version: "v1"})
Install($.Scheme$)
}
// Install registers the API group and adds types to a scheme
func Install(scheme *$.runtimeScheme|raw$) {
$- range .allInstallGroups$
$.InstallPackageAlias$.Install(scheme)
$- end$
$if .customRegister$
ExtraInstall(scheme)
$end -$
}
`
var simpleRegistration = `
var localSchemeBuilder = $.runtimeSchemeBuilder|raw${
$- range .allGroupVersions$
$.PackageAlias$.AddToScheme,
$- end$
$if .customRegister$
ExtraAddToScheme,
$end -$
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
$.metav1AddToGroupVersion|raw$($.Scheme$, $.schemaGroupVersion|raw${Version: "v1"})
$.runtimeUtilMust|raw$(AddToScheme($.Scheme$))
}
`

View File

@ -0,0 +1,341 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"fmt"
"strings"
"k8s.io/gengo/types"
)
var supportedTags = []string{
"genclient",
"genclient:nonNamespaced",
"genclient:noVerbs",
"genclient:onlyVerbs",
"genclient:skipVerbs",
"genclient:noStatus",
"genclient:readonly",
"genclient:method",
}
// SupportedVerbs is a list of supported verbs for +onlyVerbs and +skipVerbs.
var SupportedVerbs = []string{
"create",
"update",
"updateStatus",
"delete",
"deleteCollection",
"get",
"list",
"watch",
"patch",
}
// ReadonlyVerbs represents a list of read-only verbs.
var ReadonlyVerbs = []string{
"get",
"list",
"watch",
}
// genClientPrefix is the default prefix for all genclient tags.
const genClientPrefix = "genclient:"
// unsupportedExtensionVerbs is a list of verbs we don't support generating
// extension client functions for.
var unsupportedExtensionVerbs = []string{
"updateStatus",
"deleteCollection",
"watch",
"delete",
}
// inputTypeSupportedVerbs is a list of verb types that supports overriding the
// input argument type.
var inputTypeSupportedVerbs = []string{
"create",
"update",
}
// resultTypeSupportedVerbs is a list of verb types that supports overriding the
// resulting type.
var resultTypeSupportedVerbs = []string{
"create",
"update",
"get",
"list",
"patch",
}
// Extensions allows to extend the default set of client verbs
// (CRUD+watch+patch+list+deleteCollection) for a given type with custom defined
// verbs. Custom verbs can have custom input and result types and also allow to
// use a sub-resource in a request instead of top-level resource type.
//
// Example:
//
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
//
// type ReplicaSet struct { ... }
//
// The 'method=UpdateScale' is the name of the client function.
// The 'verb=update' here means the client function will use 'PUT' action.
// The 'subresource=scale' means we will use SubResource template to generate this client function.
// The 'input' is the input type used for creation (function argument).
// The 'result' (not needed in this case) is the result type returned from the
// client function.
//
type extension struct {
// VerbName is the name of the custom verb (Scale, Instantiate, etc..)
VerbName string
// VerbType is the type of the verb (only verbs from SupportedVerbs are
// supported)
VerbType string
// SubResourcePath defines a path to a sub-resource to use in the request.
// (optional)
SubResourcePath string
// InputTypeOverride overrides the input parameter type for the verb. By
// default the original type is used. Overriding the input type only works for
// "create" and "update" verb types. The given type must exists in the same
// package as the original type.
// (optional)
InputTypeOverride string
// ResultTypeOverride overrides the resulting object type for the verb. By
// default the original type is used. Overriding the result type works.
// (optional)
ResultTypeOverride string
}
// IsSubresource indicates if this extension should generate the sub-resource.
func (e *extension) IsSubresource() bool {
return len(e.SubResourcePath) > 0
}
// HasVerb checks if the extension matches the given verb.
func (e *extension) HasVerb(verb string) bool {
return e.VerbType == verb
}
// Input returns the input override package path and the type.
func (e *extension) Input() (string, string) {
parts := strings.Split(e.InputTypeOverride, ".")
return parts[len(parts)-1], strings.Join(parts[0:len(parts)-1], ".")
}
// Result returns the result override package path and the type.
func (e *extension) Result() (string, string) {
parts := strings.Split(e.ResultTypeOverride, ".")
return parts[len(parts)-1], strings.Join(parts[0:len(parts)-1], ".")
}
// Tags represents a genclient configuration for a single type.
type Tags struct {
// +genclient
GenerateClient bool
// +genclient:nonNamespaced
NonNamespaced bool
// +genclient:noStatus
NoStatus bool
// +genclient:noVerbs
NoVerbs bool
// +genclient:skipVerbs=get,update
// +genclient:onlyVerbs=create,delete
SkipVerbs []string
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
Extensions []extension
}
// HasVerb returns true if we should include the given verb in final client interface and
// generate the function for it.
func (t Tags) HasVerb(verb string) bool {
if len(t.SkipVerbs) == 0 {
return true
}
for _, s := range t.SkipVerbs {
if verb == s {
return false
}
}
return true
}
// MustParseClientGenTags calls ParseClientGenTags but instead of returning error it panics.
func MustParseClientGenTags(lines []string) Tags {
tags, err := ParseClientGenTags(lines)
if err != nil {
panic(err.Error())
}
return tags
}
// ParseClientGenTags parse the provided genclient tags and validates that no unknown
// tags are provided.
func ParseClientGenTags(lines []string) (Tags, error) {
ret := Tags{}
values := types.ExtractCommentTags("+", lines)
value := []string{}
value, ret.GenerateClient = values["genclient"]
// Check the old format and error when used to avoid generating client when //+genclient=false
if len(value) > 0 && len(value[0]) > 0 {
return ret, fmt.Errorf("+genclient=%s is invalid, use //+genclient if you want to generate client or omit it when you want to disable generation", value)
}
_, ret.NonNamespaced = values[genClientPrefix+"nonNamespaced"]
// Check the old format and error when used
if value := values["nonNamespaced"]; len(value) > 0 && len(value[0]) > 0 {
return ret, fmt.Errorf("+nonNamespaced=%s is invalid, use //+genclient:nonNamespaced instead", value[0])
}
_, ret.NoVerbs = values[genClientPrefix+"noVerbs"]
_, ret.NoStatus = values[genClientPrefix+"noStatus"]
onlyVerbs := []string{}
if _, isReadonly := values[genClientPrefix+"readonly"]; isReadonly {
onlyVerbs = ReadonlyVerbs
}
// Check the old format and error when used
if value := values["readonly"]; len(value) > 0 && len(value[0]) > 0 {
return ret, fmt.Errorf("+readonly=%s is invalid, use //+genclient:readonly instead", value[0])
}
if v, exists := values[genClientPrefix+"skipVerbs"]; exists {
ret.SkipVerbs = strings.Split(v[0], ",")
}
if v, exists := values[genClientPrefix+"onlyVerbs"]; exists || len(onlyVerbs) > 0 {
if len(v) > 0 {
onlyVerbs = append(onlyVerbs, strings.Split(v[0], ",")...)
}
skipVerbs := []string{}
for _, m := range SupportedVerbs {
skip := true
for _, o := range onlyVerbs {
if o == m {
skip = false
break
}
}
// Check for conflicts
for _, v := range skipVerbs {
if v == m {
return ret, fmt.Errorf("verb %q used both in genclient:skipVerbs and genclient:onlyVerbs", v)
}
}
if skip {
skipVerbs = append(skipVerbs, m)
}
}
ret.SkipVerbs = skipVerbs
}
var err error
if ret.Extensions, err = parseClientExtensions(values); err != nil {
return ret, err
}
return ret, validateClientGenTags(values)
}
func parseClientExtensions(tags map[string][]string) ([]extension, error) {
var ret []extension
for name, values := range tags {
if !strings.HasPrefix(name, genClientPrefix+"method") {
continue
}
for _, value := range values {
// the value comes in this form: "Foo,verb=create"
ext := extension{}
parts := strings.Split(value, ",")
if len(parts) == 0 {
return nil, fmt.Errorf("invalid of empty extension verb name: %q", value)
}
// The first part represents the name of the extension
ext.VerbName = parts[0]
if len(ext.VerbName) == 0 {
return nil, fmt.Errorf("must specify a verb name (// +genclient:method=Foo,verb=create)")
}
// Parse rest of the arguments
params := parts[1:]
for _, p := range params {
parts := strings.Split(p, "=")
if len(parts) != 2 {
return nil, fmt.Errorf("invalid extension tag specification %q", p)
}
key, val := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])
if len(val) == 0 {
return nil, fmt.Errorf("empty value of %q for %q extension", key, ext.VerbName)
}
switch key {
case "verb":
ext.VerbType = val
case "subresource":
ext.SubResourcePath = val
case "input":
ext.InputTypeOverride = val
case "result":
ext.ResultTypeOverride = val
default:
return nil, fmt.Errorf("unknown extension configuration key %q", key)
}
}
// Validate resulting extension configuration
if len(ext.VerbType) == 0 {
return nil, fmt.Errorf("verb type must be specified (use '// +genclient:method=%s,verb=create')", ext.VerbName)
}
if len(ext.ResultTypeOverride) > 0 {
supported := false
for _, v := range resultTypeSupportedVerbs {
if ext.VerbType == v {
supported = true
break
}
}
if !supported {
return nil, fmt.Errorf("%s: result type is not supported for %q verbs (supported verbs: %#v)", ext.VerbName, ext.VerbType, resultTypeSupportedVerbs)
}
}
if len(ext.InputTypeOverride) > 0 {
supported := false
for _, v := range inputTypeSupportedVerbs {
if ext.VerbType == v {
supported = true
break
}
}
if !supported {
return nil, fmt.Errorf("%s: input type is not supported for %q verbs (supported verbs: %#v)", ext.VerbName, ext.VerbType, inputTypeSupportedVerbs)
}
}
for _, t := range unsupportedExtensionVerbs {
if ext.VerbType == t {
return nil, fmt.Errorf("verb %q is not supported by extension generator", ext.VerbType)
}
}
ret = append(ret, ext)
}
}
return ret, nil
}
// validateTags validates that only supported genclient tags were provided.
func validateClientGenTags(values map[string][]string) error {
for _, k := range supportedTags {
delete(values, k)
}
for key := range values {
if strings.HasPrefix(key, strings.TrimSuffix(genClientPrefix, ":")) {
return errors.New("unknown tag detected: " + key)
}
}
return nil
}

66
vendor/k8s.io/code-generator/cmd/client-gen/main.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// client-gen makes the individual typed clients using gengo.
package main
import (
"flag"
"path/filepath"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/client-gen/args"
"k8s.io/code-generator/cmd/client-gen/generators"
"k8s.io/code-generator/pkg/util"
)
func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
// TODO: move this out of client-gen
genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath())
genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/clientset_generated/"
genericArgs.AddFlags(pflag.CommandLine)
customArgs.AddFlags(pflag.CommandLine, "k8s.io/kubernetes/pkg/apis") // TODO: move this input path out of client-gen
flag.Set("logtostderr", "true")
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
// add group version package as input dirs for gengo
for _, pkg := range customArgs.Groups {
for _, v := range pkg.Versions {
genericArgs.InputDirs = append(genericArgs.InputDirs, v.Package)
}
}
if err := generatorargs.Validate(genericArgs); err != nil {
klog.Fatalf("Error: %v", err)
}
if err := genericArgs.Execute(
generators.NameSystems(),
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
klog.Fatalf("Error: %v", err)
}
}

View File

@ -0,0 +1,31 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package path
import "strings"
// Vendorless removes the longest match of "*/vendor/" from the front of p.
// It is useful if a package locates in vendor/, e.g.,
// k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1, because gengo
// indexes the package with its import path, e.g.,
// k8s.io/apimachinery/pkg/apis/meta/v1,
func Vendorless(p string) string {
if pos := strings.LastIndex(p, "/vendor/"); pos != -1 {
return p[pos+len("/vendor/"):]
}
return p
}

View File

@ -0,0 +1,121 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"fmt"
"regexp"
"sort"
"strings"
"k8s.io/gengo/namer"
)
// ToGroupVersion turns "group/version" string into a GroupVersion struct. It reports error
// if it cannot parse the string.
func ToGroupVersion(gv string) (GroupVersion, error) {
// this can be the internal version for the legacy kube types
// TODO once we've cleared the last uses as strings, this special case should be removed.
if (len(gv) == 0) || (gv == "/") {
return GroupVersion{}, nil
}
switch strings.Count(gv, "/") {
case 0:
return GroupVersion{Group(gv), ""}, nil
case 1:
i := strings.Index(gv, "/")
return GroupVersion{Group(gv[:i]), Version(gv[i+1:])}, nil
default:
return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv)
}
}
type sortableSliceOfVersions []string
func (a sortableSliceOfVersions) Len() int { return len(a) }
func (a sortableSliceOfVersions) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a sortableSliceOfVersions) Less(i, j int) bool {
vi, vj := strings.TrimLeft(a[i], "v"), strings.TrimLeft(a[j], "v")
major := regexp.MustCompile("^[0-9]+")
viMajor, vjMajor := major.FindString(vi), major.FindString(vj)
viRemaining, vjRemaining := strings.TrimLeft(vi, viMajor), strings.TrimLeft(vj, vjMajor)
switch {
case len(viRemaining) == 0 && len(vjRemaining) == 0:
return viMajor < vjMajor
case len(viRemaining) == 0 && len(vjRemaining) != 0:
// stable version is greater than unstable version
return false
case len(viRemaining) != 0 && len(vjRemaining) == 0:
// stable version is greater than unstable version
return true
}
// neither are stable versions
if viMajor != vjMajor {
return viMajor < vjMajor
}
// assuming at most we have one alpha or one beta version, so if vi contains "alpha", it's the lesser one.
return strings.Contains(viRemaining, "alpha")
}
// Determine the default version among versions. If a user calls a group client
// without specifying the version (e.g., c.CoreV1(), instead of c.CoreV1()), the
// default version will be returned.
func defaultVersion(versions []PackageVersion) Version {
var versionStrings []string
for _, version := range versions {
versionStrings = append(versionStrings, version.Version.String())
}
sort.Sort(sortableSliceOfVersions(versionStrings))
return Version(versionStrings[len(versionStrings)-1])
}
// ToGroupVersionInfo is a helper function used by generators for groups.
func ToGroupVersionInfo(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupVersionInfo {
var groupVersionPackages []GroupVersionInfo
for _, group := range groups {
for _, version := range group.Versions {
groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: version.Version}]
groupVersionPackages = append(groupVersionPackages, GroupVersionInfo{
Group: Group(namer.IC(group.Group.NonEmpty())),
Version: Version(namer.IC(version.Version.String())),
PackageAlias: strings.ToLower(groupGoName + version.Version.NonEmpty()),
GroupGoName: groupGoName,
LowerCaseGroupGoName: namer.IL(groupGoName),
})
}
}
return groupVersionPackages
}
func ToGroupInstallPackages(groups []GroupVersions, groupGoNames map[GroupVersion]string) []GroupInstallPackage {
var groupInstallPackages []GroupInstallPackage
for _, group := range groups {
defaultVersion := defaultVersion(group.Versions)
groupGoName := groupGoNames[GroupVersion{Group: group.Group, Version: defaultVersion}]
groupInstallPackages = append(groupInstallPackages, GroupInstallPackage{
Group: Group(namer.IC(group.Group.NonEmpty())),
InstallPackageAlias: strings.ToLower(groupGoName),
})
}
return groupInstallPackages
}
// NormalizeGroupVersion calls normalizes the GroupVersion.
//func NormalizeGroupVersion(gv GroupVersion) GroupVersion {
// return GroupVersion{Group: gv.Group.NonEmpty(), Version: gv.Version, NonEmptyVersion: normalization.Version(gv.Version)}
//}

View File

@ -0,0 +1,75 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
type Version string
func (v Version) String() string {
return string(v)
}
func (v Version) NonEmpty() string {
if v == "" {
return "internalVersion"
}
return v.String()
}
type Group string
func (g Group) String() string {
return string(g)
}
func (g Group) NonEmpty() string {
if g == "api" {
return "core"
}
return string(g)
}
type PackageVersion struct {
Version
// The fully qualified package, e.g. k8s.io/kubernetes/pkg/apis/apps, where the types.go is found.
Package string
}
type GroupVersion struct {
Group Group
Version Version
}
type GroupVersions struct {
// The name of the package for this group, e.g. apps.
PackageName string
Group Group
Versions []PackageVersion
}
// GroupVersionInfo contains all the info around a group version.
type GroupVersionInfo struct {
Group Group
Version Version
PackageAlias string
GroupGoName string
LowerCaseGroupGoName string
}
type GroupInstallPackage struct {
Group Group
InstallPackageAlias string
}

View File

@ -0,0 +1,54 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package args
import (
"fmt"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
"k8s.io/gengo/examples/deepcopy-gen/generators"
)
// CustomArgs is used by the gengo framework to pass args specific to this generator.
type CustomArgs generators.CustomArgs
// NewDefaults returns default arguments for the generator.
func NewDefaults() (*args.GeneratorArgs, *CustomArgs) {
genericArgs := args.Default().WithoutDefaultFlagParsing()
customArgs := &CustomArgs{}
genericArgs.CustomArgs = (*generators.CustomArgs)(customArgs) // convert to upstream type to make type-casts work there
genericArgs.OutputFileBaseName = "deepcopy_generated"
return genericArgs, customArgs
}
// AddFlags add the generator flags to the flag set.
func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) {
pflag.CommandLine.StringSliceVar(&ca.BoundingDirs, "bounding-dirs", ca.BoundingDirs,
"Comma-separated list of import paths which bound the types for which deep-copies will be generated.")
}
// Validate checks the given arguments.
func Validate(genericArgs *args.GeneratorArgs) error {
_ = genericArgs.CustomArgs.(*generators.CustomArgs)
if len(genericArgs.OutputFileBaseName) == 0 {
return fmt.Errorf("output file base name cannot be empty")
}
return nil
}

85
vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// deepcopy-gen is a tool for auto-generating DeepCopy functions.
//
// Given a list of input directories, it will generate functions that
// efficiently perform a full deep-copy of each type. For any type that
// offers a `.DeepCopy()` method, it will simply call that. Otherwise it will
// use standard value assignment whenever possible. If that is not possible it
// will try to call its own generated copy function for the type, if the type is
// within the allowed root packages. Failing that, it will fall back on
// `conversion.Cloner.DeepCopy(val)` to make the copy. The resulting file will
// be stored in the same directory as the processed source package.
//
// Generation is governed by comment tags in the source. Any package may
// request DeepCopy generation by including a comment in the file-comments of
// one file, of the form:
// // +k8s:deepcopy-gen=package
//
// DeepCopy functions can be generated for individual types, rather than the
// entire package by specifying a comment on the type definion of the form:
// // +k8s:deepcopy-gen=true
//
// When generating for a whole package, individual types may opt out of
// DeepCopy generation by specifying a comment on the of the form:
// // +k8s:deepcopy-gen=false
//
// Note that registration is a whole-package option, and is not available for
// individual types.
package main
import (
"flag"
"path/filepath"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
"k8s.io/gengo/examples/deepcopy-gen/generators"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args"
"k8s.io/code-generator/pkg/util"
)
func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
// TODO: move this out of deepcopy-gen
genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath())
genericArgs.AddFlags(pflag.CommandLine)
customArgs.AddFlags(pflag.CommandLine)
flag.Set("logtostderr", "true")
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
if err := generatorargs.Validate(genericArgs); err != nil {
klog.Fatalf("Error: %v", err)
}
// Run it.
if err := genericArgs.Execute(
generators.NameSystems(),
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
klog.Fatalf("Error: %v", err)
}
klog.V(2).Info("Completed successfully.")
}

View File

@ -0,0 +1,77 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package args
import (
"fmt"
"path"
"github.com/spf13/pflag"
codegenutil "k8s.io/code-generator/pkg/util"
"k8s.io/gengo/args"
)
// CustomArgs is used by the gengo framework to pass args specific to this generator.
type CustomArgs struct {
VersionedClientSetPackage string
InternalClientSetPackage string
ListersPackage string
SingleDirectory bool
}
// NewDefaults returns default arguments for the generator.
func NewDefaults() (*args.GeneratorArgs, *CustomArgs) {
genericArgs := args.Default().WithoutDefaultFlagParsing()
customArgs := &CustomArgs{
SingleDirectory: false,
}
genericArgs.CustomArgs = customArgs
if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 {
genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/informers")
customArgs.VersionedClientSetPackage = path.Join(pkg, "pkg/client/clientset/versioned")
customArgs.InternalClientSetPackage = path.Join(pkg, "pkg/client/clientset/internalversion")
customArgs.ListersPackage = path.Join(pkg, "pkg/client/listers")
}
return genericArgs, customArgs
}
// AddFlags add the generator flags to the flag set.
func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&ca.InternalClientSetPackage, "internal-clientset-package", ca.InternalClientSetPackage, "the full package name for the internal clientset to use")
fs.StringVar(&ca.VersionedClientSetPackage, "versioned-clientset-package", ca.VersionedClientSetPackage, "the full package name for the versioned clientset to use")
fs.StringVar(&ca.ListersPackage, "listers-package", ca.ListersPackage, "the full package name for the listers to use")
fs.BoolVar(&ca.SingleDirectory, "single-directory", ca.SingleDirectory, "if true, omit the intermediate \"internalversion\" and \"externalversions\" subdirectories")
}
// Validate checks the given arguments.
func Validate(genericArgs *args.GeneratorArgs) error {
customArgs := genericArgs.CustomArgs.(*CustomArgs)
if len(genericArgs.OutputPackagePath) == 0 {
return fmt.Errorf("output package cannot be empty")
}
if len(customArgs.VersionedClientSetPackage) == 0 {
return fmt.Errorf("versioned clientset package cannot be empty")
}
if len(customArgs.ListersPackage) == 0 {
return fmt.Errorf("listers package cannot be empty")
}
return nil
}

View File

@ -0,0 +1,258 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"io"
"path"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/klog"
)
// factoryGenerator produces a file of listers for a given GroupVersion and
// type.
type factoryGenerator struct {
generator.DefaultGen
outputPackage string
imports namer.ImportTracker
groupVersions map[string]clientgentypes.GroupVersions
gvGoNames map[string]string
clientSetPackage string
internalInterfacesPackage string
filtered bool
}
var _ generator.Generator = &factoryGenerator{}
func (g *factoryGenerator) Filter(c *generator.Context, t *types.Type) bool {
if !g.filtered {
g.filtered = true
return true
}
return false
}
func (g *factoryGenerator) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *factoryGenerator) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
return
}
func (g *factoryGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "{{", "}}")
klog.V(5).Infof("processing type %v", t)
gvInterfaces := make(map[string]*types.Type)
gvNewFuncs := make(map[string]*types.Type)
for groupPkgName := range g.groupVersions {
gvInterfaces[groupPkgName] = c.Universe.Type(types.Name{Package: path.Join(g.outputPackage, groupPkgName), Name: "Interface"})
gvNewFuncs[groupPkgName] = c.Universe.Function(types.Name{Package: path.Join(g.outputPackage, groupPkgName), Name: "New"})
}
m := map[string]interface{}{
"cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer),
"groupVersions": g.groupVersions,
"gvInterfaces": gvInterfaces,
"gvNewFuncs": gvNewFuncs,
"gvGoNames": g.gvGoNames,
"interfacesNewInformerFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "NewInformerFunc"}),
"interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}),
"informerFactoryInterface": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}),
"clientSetInterface": c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}),
"reflectType": c.Universe.Type(reflectType),
"runtimeObject": c.Universe.Type(runtimeObject),
"schemaGroupVersionResource": c.Universe.Type(schemaGroupVersionResource),
"syncMutex": c.Universe.Type(syncMutex),
"timeDuration": c.Universe.Type(timeDuration),
"namespaceAll": c.Universe.Type(metav1NamespaceAll),
"object": c.Universe.Type(metav1Object),
}
sw.Do(sharedInformerFactoryStruct, m)
sw.Do(sharedInformerFactoryInterface, m)
return sw.Error()
}
var sharedInformerFactoryStruct = `
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client {{.clientSetInterface|raw}}
namespace string
tweakListOptions {{.interfacesTweakListOptionsFunc|raw}}
lock {{.syncMutex|raw}}
defaultResync {{.timeDuration|raw}}
customResync map[{{.reflectType|raw}}]{{.timeDuration|raw}}
informers map[{{.reflectType|raw}}]{{.cacheSharedIndexInformer|raw}}
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[{{.reflectType|raw}}]bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[{{.object|raw}}]{{.timeDuration|raw}}) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}, namespace string, tweakListOptions {{.interfacesTweakListOptionsFunc|raw}}) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[{{.reflectType|raw}}]{{.cacheSharedIndexInformer|raw}}),
startedInformers: make(map[{{.reflectType|raw}}]bool),
customResync: make(map[{{.reflectType|raw}}]{{.timeDuration|raw}}),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
// Start initializes all requested informers.
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
go informer.Run(stopCh)
f.startedInformers[informerType] = true
}
}
}
// WaitForCacheSync waits for all started informers' cache were synced.
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func()map[reflect.Type]cache.SharedIndexInformer{
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj {{.runtimeObject|raw}}, newFunc {{.interfacesNewInformerFunc|raw}}) {{.cacheSharedIndexInformer|raw}} {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
f.informers[informerType] = informer
return informer
}
`
var sharedInformerFactoryInterface = `
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
type SharedInformerFactory interface {
{{.informerFactoryInterface|raw}}
ForResource(resource {{.schemaGroupVersionResource|raw}}) (GenericInformer, error)
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
{{$gvInterfaces := .gvInterfaces}}
{{$gvGoNames := .gvGoNames}}
{{range $groupName, $group := .groupVersions}}{{index $gvGoNames $groupName}}() {{index $gvInterfaces $groupName|raw}}
{{end}}
}
{{$gvNewFuncs := .gvNewFuncs}}
{{$gvGoNames := .gvGoNames}}
{{range $groupPkgName, $group := .groupVersions}}
func (f *sharedInformerFactory) {{index $gvGoNames $groupPkgName}}() {{index $gvInterfaces $groupPkgName|raw}} {
return {{index $gvNewFuncs $groupPkgName|raw}}(f, f.namespace, f.tweakListOptions)
}
{{end}}
`

View File

@ -0,0 +1,90 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"io"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/klog"
)
// factoryInterfaceGenerator produces a file of interfaces used to break a dependency cycle for
// informer registration
type factoryInterfaceGenerator struct {
generator.DefaultGen
outputPackage string
imports namer.ImportTracker
clientSetPackage string
filtered bool
}
var _ generator.Generator = &factoryInterfaceGenerator{}
func (g *factoryInterfaceGenerator) Filter(c *generator.Context, t *types.Type) bool {
if !g.filtered {
g.filtered = true
return true
}
return false
}
func (g *factoryInterfaceGenerator) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *factoryInterfaceGenerator) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
return
}
func (g *factoryInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "{{", "}}")
klog.V(5).Infof("processing type %v", t)
m := map[string]interface{}{
"cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer),
"clientSetPackage": c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"}),
"runtimeObject": c.Universe.Type(runtimeObject),
"timeDuration": c.Universe.Type(timeDuration),
"v1ListOptions": c.Universe.Type(v1ListOptions),
}
sw.Do(externalSharedInformerFactoryInterface, m)
return sw.Error()
}
var externalSharedInformerFactoryInterface = `
// NewInformerFunc takes {{.clientSetPackage|raw}} and {{.timeDuration|raw}} to return a SharedIndexInformer.
type NewInformerFunc func({{.clientSetPackage|raw}}, {{.timeDuration|raw}}) cache.SharedIndexInformer
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
type SharedInformerFactory interface {
Start(stopCh <-chan struct{})
InformerFor(obj {{.runtimeObject|raw}}, newFunc NewInformerFunc) {{.cacheSharedIndexInformer|raw}}
}
// TweakListOptionsFunc is a function that transforms a {{.v1ListOptions|raw}}.
type TweakListOptionsFunc func(*{{.v1ListOptions|raw}})
`

View File

@ -0,0 +1,184 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"io"
"sort"
"strings"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
codegennamer "k8s.io/code-generator/pkg/namer"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
)
// genericGenerator generates the generic informer.
type genericGenerator struct {
generator.DefaultGen
outputPackage string
imports namer.ImportTracker
groupVersions map[string]clientgentypes.GroupVersions
groupGoNames map[string]string
typesForGroupVersion map[clientgentypes.GroupVersion][]*types.Type
filtered bool
}
var _ generator.Generator = &genericGenerator{}
func (g *genericGenerator) Filter(c *generator.Context, t *types.Type) bool {
if !g.filtered {
g.filtered = true
return true
}
return false
}
func (g *genericGenerator) Namers(c *generator.Context) namer.NameSystems {
pluralExceptions := map[string]string{
"Endpoints": "Endpoints",
}
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
"allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions),
"publicPlural": namer.NewPublicPluralNamer(pluralExceptions),
"resource": codegennamer.NewTagOverrideNamer("resourceName", namer.NewAllLowercasePluralNamer(pluralExceptions)),
}
}
func (g *genericGenerator) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
imports = append(imports, "fmt")
return
}
type group struct {
GroupGoName string
Name string
Versions []*version
}
type groupSort []group
func (g groupSort) Len() int { return len(g) }
func (g groupSort) Less(i, j int) bool { return strings.ToLower(g[i].Name) < strings.ToLower(g[j].Name) }
func (g groupSort) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
type version struct {
Name string
GoName string
Resources []*types.Type
}
type versionSort []*version
func (v versionSort) Len() int { return len(v) }
func (v versionSort) Less(i, j int) bool {
return strings.ToLower(v[i].Name) < strings.ToLower(v[j].Name)
}
func (v versionSort) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
func (g *genericGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "{{", "}}")
groups := []group{}
schemeGVs := make(map[*version]*types.Type)
orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)}
for groupPackageName, groupVersions := range g.groupVersions {
group := group{
GroupGoName: g.groupGoNames[groupPackageName],
Name: groupVersions.Group.NonEmpty(),
Versions: []*version{},
}
for _, v := range groupVersions.Versions {
gv := clientgentypes.GroupVersion{Group: groupVersions.Group, Version: v.Version}
version := &version{
Name: v.Version.NonEmpty(),
GoName: namer.IC(v.Version.NonEmpty()),
Resources: orderer.OrderTypes(g.typesForGroupVersion[gv]),
}
func() {
schemeGVs[version] = c.Universe.Variable(types.Name{Package: g.typesForGroupVersion[gv][0].Name.Package, Name: "SchemeGroupVersion"})
}()
group.Versions = append(group.Versions, version)
}
sort.Sort(versionSort(group.Versions))
groups = append(groups, group)
}
sort.Sort(groupSort(groups))
m := map[string]interface{}{
"cacheGenericLister": c.Universe.Type(cacheGenericLister),
"cacheNewGenericLister": c.Universe.Function(cacheNewGenericLister),
"cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer),
"groups": groups,
"schemeGVs": schemeGVs,
"schemaGroupResource": c.Universe.Type(schemaGroupResource),
"schemaGroupVersionResource": c.Universe.Type(schemaGroupVersionResource),
}
sw.Do(genericInformer, m)
sw.Do(forResource, m)
return sw.Error()
}
var genericInformer = `
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
// sharedInformers based on type
type GenericInformer interface {
Informer() {{.cacheSharedIndexInformer|raw}}
Lister() {{.cacheGenericLister|raw}}
}
type genericInformer struct {
informer {{.cacheSharedIndexInformer|raw}}
resource {{.schemaGroupResource|raw}}
}
// Informer returns the SharedIndexInformer.
func (f *genericInformer) Informer() {{.cacheSharedIndexInformer|raw}} {
return f.informer
}
// Lister returns the GenericLister.
func (f *genericInformer) Lister() {{.cacheGenericLister|raw}} {
return {{.cacheNewGenericLister|raw}}(f.Informer().GetIndexer(), f.resource)
}
`
var forResource = `
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource {{.schemaGroupVersionResource|raw}}) (GenericInformer, error) {
switch resource {
{{range $group := .groups -}}{{$GroupGoName := .GroupGoName -}}
{{range $version := .Versions -}}
// Group={{$group.Name}}, Version={{.Name}}
{{range .Resources -}}
case {{index $.schemeGVs $version|raw}}.WithResource("{{.|resource}}"):
return &genericInformer{resource: resource.GroupResource(), informer: f.{{$GroupGoName}}().{{$version.GoName}}().{{.|publicPlural}}().Informer()}, nil
{{end}}
{{end}}
{{end -}}
}
return nil, fmt.Errorf("no informer found for %v", resource)
}
`

View File

@ -0,0 +1,118 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"io"
"path/filepath"
"strings"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
)
// groupInterfaceGenerator generates the per-group interface file.
type groupInterfaceGenerator struct {
generator.DefaultGen
outputPackage string
imports namer.ImportTracker
groupVersions clientgentypes.GroupVersions
filtered bool
internalInterfacesPackage string
}
var _ generator.Generator = &groupInterfaceGenerator{}
func (g *groupInterfaceGenerator) Filter(c *generator.Context, t *types.Type) bool {
if !g.filtered {
g.filtered = true
return true
}
return false
}
func (g *groupInterfaceGenerator) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *groupInterfaceGenerator) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
return
}
type versionData struct {
Name string
Interface *types.Type
New *types.Type
}
func (g *groupInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
versions := make([]versionData, 0, len(g.groupVersions.Versions))
for _, version := range g.groupVersions.Versions {
gv := clientgentypes.GroupVersion{Group: g.groupVersions.Group, Version: version.Version}
versionPackage := filepath.Join(g.outputPackage, strings.ToLower(gv.Version.NonEmpty()))
iface := c.Universe.Type(types.Name{Package: versionPackage, Name: "Interface"})
versions = append(versions, versionData{
Name: namer.IC(version.Version.NonEmpty()),
Interface: iface,
New: c.Universe.Function(types.Name{Package: versionPackage, Name: "New"}),
})
}
m := map[string]interface{}{
"interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}),
"interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}),
"versions": versions,
}
sw.Do(groupTemplate, m)
return sw.Error()
}
var groupTemplate = `
// Interface provides access to each of this group's versions.
type Interface interface {
$range .versions -$
// $.Name$ provides access to shared informers for resources in $.Name$.
$.Name$() $.Interface|raw$
$end$
}
type group struct {
factory $.interfacesSharedInformerFactory|raw$
namespace string
tweakListOptions $.interfacesTweakListOptionsFunc|raw$
}
// New returns a new Interface.
func New(f $.interfacesSharedInformerFactory|raw$, namespace string, tweakListOptions $.interfacesTweakListOptionsFunc|raw$) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
$range .versions$
// $.Name$ returns a new $.Interface|raw$.
func (g *group) $.Name$() $.Interface|raw$ {
return $.New|raw$(g.factory, g.namespace, g.tweakListOptions)
}
$end$
`

View File

@ -0,0 +1,186 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"fmt"
"io"
"strings"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/code-generator/cmd/client-gen/generators/util"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"k8s.io/klog"
)
// informerGenerator produces a file of listers for a given GroupVersion and
// type.
type informerGenerator struct {
generator.DefaultGen
outputPackage string
groupPkgName string
groupVersion clientgentypes.GroupVersion
groupGoName string
typeToGenerate *types.Type
imports namer.ImportTracker
clientSetPackage string
listersPackage string
internalInterfacesPackage string
}
var _ generator.Generator = &informerGenerator{}
func (g *informerGenerator) Filter(c *generator.Context, t *types.Type) bool {
return t == g.typeToGenerate
}
func (g *informerGenerator) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *informerGenerator) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
return
}
func (g *informerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
klog.V(5).Infof("processing type %v", t)
listerPackage := fmt.Sprintf("%s/%s/%s", g.listersPackage, g.groupPkgName, strings.ToLower(g.groupVersion.Version.NonEmpty()))
clientSetInterface := c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"})
informerFor := "InformerFor"
tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
if err != nil {
return err
}
m := map[string]interface{}{
"apiScheme": c.Universe.Type(apiScheme),
"cacheIndexers": c.Universe.Type(cacheIndexers),
"cacheListWatch": c.Universe.Type(cacheListWatch),
"cacheMetaNamespaceIndexFunc": c.Universe.Function(cacheMetaNamespaceIndexFunc),
"cacheNamespaceIndex": c.Universe.Variable(cacheNamespaceIndex),
"cacheNewSharedIndexInformer": c.Universe.Function(cacheNewSharedIndexInformer),
"cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer),
"clientSetInterface": clientSetInterface,
"group": namer.IC(g.groupGoName),
"informerFor": informerFor,
"interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}),
"interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}),
"listOptions": c.Universe.Type(listOptions),
"lister": c.Universe.Type(types.Name{Package: listerPackage, Name: t.Name.Name + "Lister"}),
"namespaceAll": c.Universe.Type(metav1NamespaceAll),
"namespaced": !tags.NonNamespaced,
"newLister": c.Universe.Function(types.Name{Package: listerPackage, Name: "New" + t.Name.Name + "Lister"}),
"runtimeObject": c.Universe.Type(runtimeObject),
"timeDuration": c.Universe.Type(timeDuration),
"type": t,
"v1ListOptions": c.Universe.Type(v1ListOptions),
"version": namer.IC(g.groupVersion.Version.String()),
"watchInterface": c.Universe.Type(watchInterface),
}
sw.Do(typeInformerInterface, m)
sw.Do(typeInformerStruct, m)
sw.Do(typeInformerPublicConstructor, m)
sw.Do(typeFilteredInformerPublicConstructor, m)
sw.Do(typeInformerConstructor, m)
sw.Do(typeInformerInformer, m)
sw.Do(typeInformerLister, m)
return sw.Error()
}
var typeInformerInterface = `
// $.type|public$Informer provides access to a shared informer and lister for
// $.type|publicPlural$.
type $.type|public$Informer interface {
Informer() $.cacheSharedIndexInformer|raw$
Lister() $.lister|raw$
}
`
var typeInformerStruct = `
type $.type|private$Informer struct {
factory $.interfacesSharedInformerFactory|raw$
tweakListOptions $.interfacesTweakListOptionsFunc|raw$
$if .namespaced$namespace string$end$
}
`
var typeInformerPublicConstructor = `
// New$.type|public$Informer constructs a new informer for $.type|public$ type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func New$.type|public$Informer(client $.clientSetInterface|raw$$if .namespaced$, namespace string$end$, resyncPeriod $.timeDuration|raw$, indexers $.cacheIndexers|raw$) $.cacheSharedIndexInformer|raw$ {
return NewFiltered$.type|public$Informer(client$if .namespaced$, namespace$end$, resyncPeriod, indexers, nil)
}
`
var typeFilteredInformerPublicConstructor = `
// NewFiltered$.type|public$Informer constructs a new informer for $.type|public$ type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFiltered$.type|public$Informer(client $.clientSetInterface|raw$$if .namespaced$, namespace string$end$, resyncPeriod $.timeDuration|raw$, indexers $.cacheIndexers|raw$, tweakListOptions $.interfacesTweakListOptionsFunc|raw$) $.cacheSharedIndexInformer|raw$ {
return $.cacheNewSharedIndexInformer|raw$(
&$.cacheListWatch|raw${
ListFunc: func(options $.v1ListOptions|raw$) ($.runtimeObject|raw$, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.$.group$$.version$().$.type|publicPlural$($if .namespaced$namespace$end$).List(options)
},
WatchFunc: func(options $.v1ListOptions|raw$) ($.watchInterface|raw$, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.$.group$$.version$().$.type|publicPlural$($if .namespaced$namespace$end$).Watch(options)
},
},
&$.type|raw${},
resyncPeriod,
indexers,
)
}
`
var typeInformerConstructor = `
func (f *$.type|private$Informer) defaultInformer(client $.clientSetInterface|raw$, resyncPeriod $.timeDuration|raw$) $.cacheSharedIndexInformer|raw$ {
return NewFiltered$.type|public$Informer(client$if .namespaced$, f.namespace$end$, resyncPeriod, $.cacheIndexers|raw${$.cacheNamespaceIndex|raw$: $.cacheMetaNamespaceIndexFunc|raw$}, f.tweakListOptions)
}
`
var typeInformerInformer = `
func (f *$.type|private$Informer) Informer() $.cacheSharedIndexInformer|raw$ {
return f.factory.$.informerFor$(&$.type|raw${}, f.defaultInformer)
}
`
var typeInformerLister = `
func (f *$.type|private$Informer) Lister() $.lister|raw$ {
return $.newLister|raw$(f.Informer().GetIndexer())
}
`

View File

@ -0,0 +1,352 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"fmt"
"path"
"path/filepath"
"strings"
"k8s.io/gengo/args"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/klog"
"k8s.io/code-generator/cmd/client-gen/generators/util"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
informergenargs "k8s.io/code-generator/cmd/informer-gen/args"
)
// NameSystems returns the name system used by the generators in this package.
func NameSystems() namer.NameSystems {
pluralExceptions := map[string]string{
"Endpoints": "Endpoints",
}
return namer.NameSystems{
"public": namer.NewPublicNamer(0),
"private": namer.NewPrivateNamer(0),
"raw": namer.NewRawNamer("", nil),
"publicPlural": namer.NewPublicPluralNamer(pluralExceptions),
"allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions),
"lowercaseSingular": &lowercaseSingularNamer{},
}
}
// lowercaseSingularNamer implements Namer
type lowercaseSingularNamer struct{}
// Name returns t's name in all lowercase.
func (n *lowercaseSingularNamer) Name(t *types.Type) string {
return strings.ToLower(t.Name.Name)
}
// DefaultNameSystem returns the default name system for ordering the types to be
// processed by the generators in this package.
func DefaultNameSystem() string {
return "public"
}
// objectMetaForPackage returns the type of ObjectMeta used by package p.
func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) {
generatingForPackage := false
for _, t := range p.Types {
if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient {
continue
}
generatingForPackage = true
for _, member := range t.Members {
if member.Name == "ObjectMeta" {
return member.Type, isInternal(member), nil
}
}
}
if generatingForPackage {
return nil, false, fmt.Errorf("unable to find ObjectMeta for any types in package %s", p.Path)
}
return nil, false, nil
}
// isInternal returns true if the tags for a member do not contain a json tag
func isInternal(m types.Member) bool {
return !strings.Contains(m.Tags, "json")
}
func packageForInternalInterfaces(base string) string {
return filepath.Join(base, "internalinterfaces")
}
func vendorless(p string) string {
if pos := strings.LastIndex(p, "/vendor/"); pos != -1 {
return p[pos+len("/vendor/"):]
}
return p
}
// Packages makes the client package definition.
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
klog.Fatalf("Failed loading boilerplate: %v", err)
}
customArgs, ok := arguments.CustomArgs.(*informergenargs.CustomArgs)
if !ok {
klog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs)
}
internalVersionPackagePath := filepath.Join(arguments.OutputPackagePath)
externalVersionPackagePath := filepath.Join(arguments.OutputPackagePath)
if !customArgs.SingleDirectory {
internalVersionPackagePath = filepath.Join(arguments.OutputPackagePath, "internalversion")
externalVersionPackagePath = filepath.Join(arguments.OutputPackagePath, "externalversions")
}
var packageList generator.Packages
typesForGroupVersion := make(map[clientgentypes.GroupVersion][]*types.Type)
externalGroupVersions := make(map[string]clientgentypes.GroupVersions)
internalGroupVersions := make(map[string]clientgentypes.GroupVersions)
groupGoNames := make(map[string]string)
for _, inputDir := range arguments.InputDirs {
p := context.Universe.Package(vendorless(inputDir))
objectMeta, internal, err := objectMetaForPackage(p)
if err != nil {
klog.Fatal(err)
}
if objectMeta == nil {
// no types in this package had genclient
continue
}
var gv clientgentypes.GroupVersion
var targetGroupVersions map[string]clientgentypes.GroupVersions
if internal {
lastSlash := strings.LastIndex(p.Path, "/")
if lastSlash == -1 {
klog.Fatalf("error constructing internal group version for package %q", p.Path)
}
gv.Group = clientgentypes.Group(p.Path[lastSlash+1:])
targetGroupVersions = internalGroupVersions
} else {
parts := strings.Split(p.Path, "/")
gv.Group = clientgentypes.Group(parts[len(parts)-2])
gv.Version = clientgentypes.Version(parts[len(parts)-1])
targetGroupVersions = externalGroupVersions
}
groupPackageName := gv.Group.NonEmpty()
gvPackage := path.Clean(p.Path)
// If there's a comment of the form "// +groupName=somegroup" or
// "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the
// group when generating.
if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil {
gv.Group = clientgentypes.Group(override[0])
}
// If there's a comment of the form "// +groupGoName=SomeUniqueShortName", use that as
// the Go group identifier in CamelCase. It defaults
groupGoNames[groupPackageName] = namer.IC(strings.Split(gv.Group.NonEmpty(), ".")[0])
if override := types.ExtractCommentTags("+", p.Comments)["groupGoName"]; override != nil {
groupGoNames[groupPackageName] = namer.IC(override[0])
}
var typesToGenerate []*types.Type
for _, t := range p.Types {
tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
if !tags.GenerateClient || tags.NoVerbs || !tags.HasVerb("list") || !tags.HasVerb("watch") {
continue
}
typesToGenerate = append(typesToGenerate, t)
if _, ok := typesForGroupVersion[gv]; !ok {
typesForGroupVersion[gv] = []*types.Type{}
}
typesForGroupVersion[gv] = append(typesForGroupVersion[gv], t)
}
if len(typesToGenerate) == 0 {
continue
}
groupVersionsEntry, ok := targetGroupVersions[groupPackageName]
if !ok {
groupVersionsEntry = clientgentypes.GroupVersions{
PackageName: groupPackageName,
Group: gv.Group,
}
}
groupVersionsEntry.Versions = append(groupVersionsEntry.Versions, clientgentypes.PackageVersion{Version: gv.Version, Package: gvPackage})
targetGroupVersions[groupPackageName] = groupVersionsEntry
orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)}
typesToGenerate = orderer.OrderTypes(typesToGenerate)
if internal {
packageList = append(packageList, versionPackage(internalVersionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs.InternalClientSetPackage, customArgs.ListersPackage))
} else {
packageList = append(packageList, versionPackage(externalVersionPackagePath, groupPackageName, gv, groupGoNames[groupPackageName], boilerplate, typesToGenerate, customArgs.VersionedClientSetPackage, customArgs.ListersPackage))
}
}
if len(externalGroupVersions) != 0 {
packageList = append(packageList, factoryInterfacePackage(externalVersionPackagePath, boilerplate, customArgs.VersionedClientSetPackage))
packageList = append(packageList, factoryPackage(externalVersionPackagePath, boilerplate, groupGoNames, externalGroupVersions, customArgs.VersionedClientSetPackage, typesForGroupVersion))
for _, gvs := range externalGroupVersions {
packageList = append(packageList, groupPackage(externalVersionPackagePath, gvs, boilerplate))
}
}
if len(internalGroupVersions) != 0 {
packageList = append(packageList, factoryInterfacePackage(internalVersionPackagePath, boilerplate, customArgs.InternalClientSetPackage))
packageList = append(packageList, factoryPackage(internalVersionPackagePath, boilerplate, groupGoNames, internalGroupVersions, customArgs.InternalClientSetPackage, typesForGroupVersion))
for _, gvs := range internalGroupVersions {
packageList = append(packageList, groupPackage(internalVersionPackagePath, gvs, boilerplate))
}
}
return packageList
}
func factoryPackage(basePackage string, boilerplate []byte, groupGoNames map[string]string, groupVersions map[string]clientgentypes.GroupVersions, clientSetPackage string, typesForGroupVersion map[clientgentypes.GroupVersion][]*types.Type) generator.Package {
return &generator.DefaultPackage{
PackageName: filepath.Base(basePackage),
PackagePath: basePackage,
HeaderText: boilerplate,
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = append(generators, &factoryGenerator{
DefaultGen: generator.DefaultGen{
OptionalName: "factory",
},
outputPackage: basePackage,
imports: generator.NewImportTracker(),
groupVersions: groupVersions,
clientSetPackage: clientSetPackage,
internalInterfacesPackage: packageForInternalInterfaces(basePackage),
gvGoNames: groupGoNames,
})
generators = append(generators, &genericGenerator{
DefaultGen: generator.DefaultGen{
OptionalName: "generic",
},
outputPackage: basePackage,
imports: generator.NewImportTracker(),
groupVersions: groupVersions,
typesForGroupVersion: typesForGroupVersion,
groupGoNames: groupGoNames,
})
return generators
},
}
}
func factoryInterfacePackage(basePackage string, boilerplate []byte, clientSetPackage string) generator.Package {
packagePath := packageForInternalInterfaces(basePackage)
return &generator.DefaultPackage{
PackageName: filepath.Base(packagePath),
PackagePath: packagePath,
HeaderText: boilerplate,
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = append(generators, &factoryInterfaceGenerator{
DefaultGen: generator.DefaultGen{
OptionalName: "factory_interfaces",
},
outputPackage: packagePath,
imports: generator.NewImportTracker(),
clientSetPackage: clientSetPackage,
})
return generators
},
}
}
func groupPackage(basePackage string, groupVersions clientgentypes.GroupVersions, boilerplate []byte) generator.Package {
packagePath := filepath.Join(basePackage, groupVersions.PackageName)
groupPkgName := strings.Split(string(groupVersions.Group), ".")[0]
return &generator.DefaultPackage{
PackageName: groupPkgName,
PackagePath: packagePath,
HeaderText: boilerplate,
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = append(generators, &groupInterfaceGenerator{
DefaultGen: generator.DefaultGen{
OptionalName: "interface",
},
outputPackage: packagePath,
groupVersions: groupVersions,
imports: generator.NewImportTracker(),
internalInterfacesPackage: packageForInternalInterfaces(basePackage),
})
return generators
},
FilterFunc: func(c *generator.Context, t *types.Type) bool {
tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch")
},
}
}
func versionPackage(basePackage string, groupPkgName string, gv clientgentypes.GroupVersion, groupGoName string, boilerplate []byte, typesToGenerate []*types.Type, clientSetPackage, listersPackage string) generator.Package {
packagePath := filepath.Join(basePackage, groupPkgName, strings.ToLower(gv.Version.NonEmpty()))
return &generator.DefaultPackage{
PackageName: strings.ToLower(gv.Version.NonEmpty()),
PackagePath: packagePath,
HeaderText: boilerplate,
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = append(generators, &versionInterfaceGenerator{
DefaultGen: generator.DefaultGen{
OptionalName: "interface",
},
outputPackage: packagePath,
imports: generator.NewImportTracker(),
types: typesToGenerate,
internalInterfacesPackage: packageForInternalInterfaces(basePackage),
})
for _, t := range typesToGenerate {
generators = append(generators, &informerGenerator{
DefaultGen: generator.DefaultGen{
OptionalName: strings.ToLower(t.Name.Name),
},
outputPackage: packagePath,
groupPkgName: groupPkgName,
groupVersion: gv,
groupGoName: groupGoName,
typeToGenerate: t,
imports: generator.NewImportTracker(),
clientSetPackage: clientSetPackage,
listersPackage: listersPackage,
internalInterfacesPackage: packageForInternalInterfaces(basePackage),
})
}
return generators
},
FilterFunc: func(c *generator.Context, t *types.Type) bool {
tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("watch")
},
}
}

View File

@ -0,0 +1,33 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"k8s.io/gengo/types"
"k8s.io/klog"
)
// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if
// it exists, the value is boolean. If the tag did not exist, it returns
// false.
func extractBoolTagOrDie(key string, lines []string) bool {
val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines)
if err != nil {
klog.Fatal(err)
}
return val
}

View File

@ -0,0 +1,42 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import "k8s.io/gengo/types"
var (
apiScheme = types.Name{Package: "k8s.io/kubernetes/pkg/api/legacyscheme", Name: "Scheme"}
cacheGenericLister = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "GenericLister"}
cacheIndexers = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "Indexers"}
cacheListWatch = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "ListWatch"}
cacheMetaNamespaceIndexFunc = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "MetaNamespaceIndexFunc"}
cacheNamespaceIndex = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NamespaceIndex"}
cacheNewGenericLister = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewGenericLister"}
cacheNewSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewSharedIndexInformer"}
cacheSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "SharedIndexInformer"}
listOptions = types.Name{Package: "k8s.io/kubernetes/pkg/apis/core", Name: "ListOptions"}
reflectType = types.Name{Package: "reflect", Name: "Type"}
runtimeObject = types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "Object"}
schemaGroupResource = types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupResource"}
schemaGroupVersionResource = types.Name{Package: "k8s.io/apimachinery/pkg/runtime/schema", Name: "GroupVersionResource"}
syncMutex = types.Name{Package: "sync", Name: "Mutex"}
timeDuration = types.Name{Package: "time", Name: "Duration"}
v1ListOptions = types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}
metav1NamespaceAll = types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "NamespaceAll"}
metav1Object = types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "Object"}
watchInterface = types.Name{Package: "k8s.io/apimachinery/pkg/watch", Name: "Interface"}
)

View File

@ -0,0 +1,109 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"io"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/code-generator/cmd/client-gen/generators/util"
)
// versionInterfaceGenerator generates the per-version interface file.
type versionInterfaceGenerator struct {
generator.DefaultGen
outputPackage string
imports namer.ImportTracker
types []*types.Type
filtered bool
internalInterfacesPackage string
}
var _ generator.Generator = &versionInterfaceGenerator{}
func (g *versionInterfaceGenerator) Filter(c *generator.Context, t *types.Type) bool {
if !g.filtered {
g.filtered = true
return true
}
return false
}
func (g *versionInterfaceGenerator) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *versionInterfaceGenerator) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
return
}
func (g *versionInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
m := map[string]interface{}{
"interfacesTweakListOptionsFunc": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "TweakListOptionsFunc"}),
"interfacesSharedInformerFactory": c.Universe.Type(types.Name{Package: g.internalInterfacesPackage, Name: "SharedInformerFactory"}),
"types": g.types,
}
sw.Do(versionTemplate, m)
for _, typeDef := range g.types {
tags, err := util.ParseClientGenTags(typeDef.SecondClosestCommentLines)
if err != nil {
return err
}
m["namespaced"] = !tags.NonNamespaced
m["type"] = typeDef
sw.Do(versionFuncTemplate, m)
}
return sw.Error()
}
var versionTemplate = `
// Interface provides access to all the informers in this group version.
type Interface interface {
$range .types -$
// $.|publicPlural$ returns a $.|public$Informer.
$.|publicPlural$() $.|public$Informer
$end$
}
type version struct {
factory $.interfacesSharedInformerFactory|raw$
namespace string
tweakListOptions $.interfacesTweakListOptionsFunc|raw$
}
// New returns a new Interface.
func New(f $.interfacesSharedInformerFactory|raw$, namespace string, tweakListOptions $.interfacesTweakListOptionsFunc|raw$) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
`
var versionFuncTemplate = `
// $.type|publicPlural$ returns a $.type|public$Informer.
func (v *version) $.type|publicPlural$() $.type|public$Informer {
return &$.type|private$Informer{factory: v.factory$if .namespaced$, namespace: v.namespace$end$, tweakListOptions: v.tweakListOptions}
}
`

63
vendor/k8s.io/code-generator/cmd/informer-gen/main.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"path/filepath"
"github.com/spf13/pflag"
"k8s.io/code-generator/cmd/informer-gen/generators"
"k8s.io/code-generator/pkg/util"
"k8s.io/gengo/args"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/informer-gen/args"
)
func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
// TODO: move out of informer-gen
genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath())
genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/informers/informers_generated"
customArgs.VersionedClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
customArgs.InternalClientSetPackage = "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
customArgs.ListersPackage = "k8s.io/kubernetes/pkg/client/listers"
genericArgs.AddFlags(pflag.CommandLine)
customArgs.AddFlags(pflag.CommandLine)
flag.Set("logtostderr", "true")
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
if err := generatorargs.Validate(genericArgs); err != nil {
klog.Fatalf("Error: %v", err)
}
// Run it.
if err := genericArgs.Execute(
generators.NameSystems(),
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
klog.Fatalf("Error: %v", err)
}
klog.V(2).Info("Completed successfully.")
}

View File

@ -0,0 +1 @@
{}

View File

@ -0,0 +1,56 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package args
import (
"fmt"
"path"
"github.com/spf13/pflag"
codegenutil "k8s.io/code-generator/pkg/util"
"k8s.io/gengo/args"
)
// CustomArgs is used by the gengo framework to pass args specific to this generator.
type CustomArgs struct{}
// NewDefaults returns default arguments for the generator.
func NewDefaults() (*args.GeneratorArgs, *CustomArgs) {
genericArgs := args.Default().WithoutDefaultFlagParsing()
customArgs := &CustomArgs{}
genericArgs.CustomArgs = customArgs
if pkg := codegenutil.CurrentPackage(); len(pkg) != 0 {
genericArgs.OutputPackagePath = path.Join(pkg, "pkg/client/listers")
}
return genericArgs, customArgs
}
// AddFlags add the generator flags to the flag set.
func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) {}
// Validate checks the given arguments.
func Validate(genericArgs *args.GeneratorArgs) error {
_ = genericArgs.CustomArgs.(*CustomArgs)
if len(genericArgs.OutputPackagePath) == 0 {
return fmt.Errorf("output package cannot be empty")
}
return nil
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"io"
"os"
"path/filepath"
"strings"
"k8s.io/gengo/generator"
"k8s.io/gengo/types"
"k8s.io/code-generator/cmd/client-gen/generators/util"
)
// expansionGenerator produces a file for a expansion interfaces.
type expansionGenerator struct {
generator.DefaultGen
packagePath string
types []*types.Type
}
// We only want to call GenerateType() once per group.
func (g *expansionGenerator) Filter(c *generator.Context, t *types.Type) bool {
return t == g.types[0]
}
func (g *expansionGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
for _, t := range g.types {
tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
if _, err := os.Stat(filepath.Join(g.packagePath, strings.ToLower(t.Name.Name+"_expansion.go"))); os.IsNotExist(err) {
sw.Do(expansionInterfaceTemplate, t)
if !tags.NonNamespaced {
sw.Do(namespacedExpansionInterfaceTemplate, t)
}
}
}
return sw.Error()
}
var expansionInterfaceTemplate = `
// $.|public$ListerExpansion allows custom methods to be added to
// $.|public$Lister.
type $.|public$ListerExpansion interface {}
`
var namespacedExpansionInterfaceTemplate = `
// $.|public$NamespaceListerExpansion allows custom methods to be added to
// $.|public$NamespaceLister.
type $.|public$NamespaceListerExpansion interface {}
`

View File

@ -0,0 +1,371 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"fmt"
"io"
"path/filepath"
"strings"
"k8s.io/gengo/args"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/code-generator/cmd/client-gen/generators/util"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"k8s.io/klog"
)
// NameSystems returns the name system used by the generators in this package.
func NameSystems() namer.NameSystems {
pluralExceptions := map[string]string{
"Endpoints": "Endpoints",
}
return namer.NameSystems{
"public": namer.NewPublicNamer(0),
"private": namer.NewPrivateNamer(0),
"raw": namer.NewRawNamer("", nil),
"publicPlural": namer.NewPublicPluralNamer(pluralExceptions),
"allLowercasePlural": namer.NewAllLowercasePluralNamer(pluralExceptions),
"lowercaseSingular": &lowercaseSingularNamer{},
}
}
// lowercaseSingularNamer implements Namer
type lowercaseSingularNamer struct{}
// Name returns t's name in all lowercase.
func (n *lowercaseSingularNamer) Name(t *types.Type) string {
return strings.ToLower(t.Name.Name)
}
// DefaultNameSystem returns the default name system for ordering the types to be
// processed by the generators in this package.
func DefaultNameSystem() string {
return "public"
}
// Packages makes the client package definition.
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
klog.Fatalf("Failed loading boilerplate: %v", err)
}
var packageList generator.Packages
for _, inputDir := range arguments.InputDirs {
p := context.Universe.Package(inputDir)
objectMeta, internal, err := objectMetaForPackage(p)
if err != nil {
klog.Fatal(err)
}
if objectMeta == nil {
// no types in this package had genclient
continue
}
var gv clientgentypes.GroupVersion
var internalGVPkg string
if internal {
lastSlash := strings.LastIndex(p.Path, "/")
if lastSlash == -1 {
klog.Fatalf("error constructing internal group version for package %q", p.Path)
}
gv.Group = clientgentypes.Group(p.Path[lastSlash+1:])
internalGVPkg = p.Path
} else {
parts := strings.Split(p.Path, "/")
gv.Group = clientgentypes.Group(parts[len(parts)-2])
gv.Version = clientgentypes.Version(parts[len(parts)-1])
internalGVPkg = strings.Join(parts[0:len(parts)-1], "/")
}
groupPackageName := strings.ToLower(gv.Group.NonEmpty())
// If there's a comment of the form "// +groupName=somegroup" or
// "// +groupName=somegroup.foo.bar.io", use the first field (somegroup) as the name of the
// group when generating.
if override := types.ExtractCommentTags("+", p.Comments)["groupName"]; override != nil {
gv.Group = clientgentypes.Group(strings.SplitN(override[0], ".", 2)[0])
}
var typesToGenerate []*types.Type
for _, t := range p.Types {
tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
if !tags.GenerateClient || !tags.HasVerb("list") || !tags.HasVerb("get") {
continue
}
typesToGenerate = append(typesToGenerate, t)
}
if len(typesToGenerate) == 0 {
continue
}
orderer := namer.Orderer{Namer: namer.NewPrivateNamer(0)}
typesToGenerate = orderer.OrderTypes(typesToGenerate)
packagePath := filepath.Join(arguments.OutputPackagePath, groupPackageName, strings.ToLower(gv.Version.NonEmpty()))
packageList = append(packageList, &generator.DefaultPackage{
PackageName: strings.ToLower(gv.Version.NonEmpty()),
PackagePath: packagePath,
HeaderText: boilerplate,
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
generators = append(generators, &expansionGenerator{
DefaultGen: generator.DefaultGen{
OptionalName: "expansion_generated",
},
packagePath: filepath.Join(arguments.OutputBase, packagePath),
types: typesToGenerate,
})
for _, t := range typesToGenerate {
generators = append(generators, &listerGenerator{
DefaultGen: generator.DefaultGen{
OptionalName: strings.ToLower(t.Name.Name),
},
outputPackage: arguments.OutputPackagePath,
groupVersion: gv,
internalGVPkg: internalGVPkg,
typeToGenerate: t,
imports: generator.NewImportTracker(),
objectMeta: objectMeta,
})
}
return generators
},
FilterFunc: func(c *generator.Context, t *types.Type) bool {
tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
return tags.GenerateClient && tags.HasVerb("list") && tags.HasVerb("get")
},
})
}
return packageList
}
// objectMetaForPackage returns the type of ObjectMeta used by package p.
func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) {
generatingForPackage := false
for _, t := range p.Types {
// filter out types which dont have genclient.
if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient {
continue
}
generatingForPackage = true
for _, member := range t.Members {
if member.Name == "ObjectMeta" {
return member.Type, isInternal(member), nil
}
}
}
if generatingForPackage {
return nil, false, fmt.Errorf("unable to find ObjectMeta for any types in package %s", p.Path)
}
return nil, false, nil
}
// isInternal returns true if the tags for a member do not contain a json tag
func isInternal(m types.Member) bool {
return !strings.Contains(m.Tags, "json")
}
// listerGenerator produces a file of listers for a given GroupVersion and
// type.
type listerGenerator struct {
generator.DefaultGen
outputPackage string
groupVersion clientgentypes.GroupVersion
internalGVPkg string
typeToGenerate *types.Type
imports namer.ImportTracker
objectMeta *types.Type
}
var _ generator.Generator = &listerGenerator{}
func (g *listerGenerator) Filter(c *generator.Context, t *types.Type) bool {
return t == g.typeToGenerate
}
func (g *listerGenerator) Namers(c *generator.Context) namer.NameSystems {
return namer.NameSystems{
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
}
}
func (g *listerGenerator) Imports(c *generator.Context) (imports []string) {
imports = append(imports, g.imports.ImportLines()...)
imports = append(imports, "k8s.io/apimachinery/pkg/api/errors")
imports = append(imports, "k8s.io/apimachinery/pkg/labels")
// for Indexer
imports = append(imports, "k8s.io/client-go/tools/cache")
return
}
func (g *listerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
klog.V(5).Infof("processing type %v", t)
m := map[string]interface{}{
"Resource": c.Universe.Function(types.Name{Package: t.Name.Package, Name: "Resource"}),
"type": t,
"objectMeta": g.objectMeta,
}
tags, err := util.ParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...))
if err != nil {
return err
}
if tags.NonNamespaced {
sw.Do(typeListerInterface_NonNamespaced, m)
} else {
sw.Do(typeListerInterface, m)
}
sw.Do(typeListerStruct, m)
sw.Do(typeListerConstructor, m)
sw.Do(typeLister_List, m)
if tags.NonNamespaced {
sw.Do(typeLister_NonNamespacedGet, m)
return sw.Error()
}
sw.Do(typeLister_NamespaceLister, m)
sw.Do(namespaceListerInterface, m)
sw.Do(namespaceListerStruct, m)
sw.Do(namespaceLister_List, m)
sw.Do(namespaceLister_Get, m)
return sw.Error()
}
var typeListerInterface = `
// $.type|public$Lister helps list $.type|publicPlural$.
type $.type|public$Lister interface {
// List lists all $.type|publicPlural$ in the indexer.
List(selector labels.Selector) (ret []*$.type|raw$, err error)
// $.type|publicPlural$ returns an object that can list and get $.type|publicPlural$.
$.type|publicPlural$(namespace string) $.type|public$NamespaceLister
$.type|public$ListerExpansion
}
`
var typeListerInterface_NonNamespaced = `
// $.type|public$Lister helps list $.type|publicPlural$.
type $.type|public$Lister interface {
// List lists all $.type|publicPlural$ in the indexer.
List(selector labels.Selector) (ret []*$.type|raw$, err error)
// Get retrieves the $.type|public$ from the index for a given name.
Get(name string) (*$.type|raw$, error)
$.type|public$ListerExpansion
}
`
var typeListerStruct = `
// $.type|private$Lister implements the $.type|public$Lister interface.
type $.type|private$Lister struct {
indexer cache.Indexer
}
`
var typeListerConstructor = `
// New$.type|public$Lister returns a new $.type|public$Lister.
func New$.type|public$Lister(indexer cache.Indexer) $.type|public$Lister {
return &$.type|private$Lister{indexer: indexer}
}
`
var typeLister_List = `
// List lists all $.type|publicPlural$ in the indexer.
func (s *$.type|private$Lister) List(selector labels.Selector) (ret []*$.type|raw$, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*$.type|raw$))
})
return ret, err
}
`
var typeLister_NamespaceLister = `
// $.type|publicPlural$ returns an object that can list and get $.type|publicPlural$.
func (s *$.type|private$Lister) $.type|publicPlural$(namespace string) $.type|public$NamespaceLister {
return $.type|private$NamespaceLister{indexer: s.indexer, namespace: namespace}
}
`
var typeLister_NonNamespacedGet = `
// Get retrieves the $.type|public$ from the index for a given name.
func (s *$.type|private$Lister) Get(name string) (*$.type|raw$, error) {
obj, exists, err := s.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound($.Resource|raw$("$.type|lowercaseSingular$"), name)
}
return obj.(*$.type|raw$), nil
}
`
var namespaceListerInterface = `
// $.type|public$NamespaceLister helps list and get $.type|publicPlural$.
type $.type|public$NamespaceLister interface {
// List lists all $.type|publicPlural$ in the indexer for a given namespace.
List(selector labels.Selector) (ret []*$.type|raw$, err error)
// Get retrieves the $.type|public$ from the indexer for a given namespace and name.
Get(name string) (*$.type|raw$, error)
$.type|public$NamespaceListerExpansion
}
`
var namespaceListerStruct = `
// $.type|private$NamespaceLister implements the $.type|public$NamespaceLister
// interface.
type $.type|private$NamespaceLister struct {
indexer cache.Indexer
namespace string
}
`
var namespaceLister_List = `
// List lists all $.type|publicPlural$ in the indexer for a given namespace.
func (s $.type|private$NamespaceLister) List(selector labels.Selector) (ret []*$.type|raw$, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*$.type|raw$))
})
return ret, err
}
`
var namespaceLister_Get = `
// Get retrieves the $.type|public$ from the indexer for a given namespace and name.
func (s $.type|private$NamespaceLister) Get(name string) (*$.type|raw$, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound($.Resource|raw$("$.type|lowercaseSingular$"), name)
}
return obj.(*$.type|raw$), nil
}
`

View File

@ -0,0 +1,33 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"k8s.io/gengo/types"
"k8s.io/klog"
)
// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if
// it exists, the value is boolean. If the tag did not exist, it returns
// false.
func extractBoolTagOrDie(key string, lines []string) bool {
val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines)
if err != nil {
klog.Fatal(err)
}
return val
}

60
vendor/k8s.io/code-generator/cmd/lister-gen/main.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"path/filepath"
"github.com/spf13/pflag"
"k8s.io/code-generator/cmd/lister-gen/generators"
"k8s.io/code-generator/pkg/util"
"k8s.io/gengo/args"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/lister-gen/args"
)
func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
// TODO: move this out of lister-gen
genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath())
genericArgs.OutputPackagePath = "k8s.io/kubernetes/pkg/client/listers"
genericArgs.AddFlags(pflag.CommandLine)
customArgs.AddFlags(pflag.CommandLine)
flag.Set("logtostderr", "true")
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
if err := generatorargs.Validate(genericArgs); err != nil {
klog.Fatalf("Error: %v", err)
}
// Run it.
if err := genericArgs.Execute(
generators.NameSystems(),
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
klog.Fatalf("Error: %v", err)
}
klog.V(2).Info("Completed successfully.")
}

58
vendor/k8s.io/code-generator/pkg/namer/tag-override.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package namer
import (
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
)
// TagOverrideNamer is a namer which pulls names from a given tag, if specified,
// and otherwise falls back to a different namer.
type TagOverrideNamer struct {
tagName string
fallback namer.Namer
}
// Name returns the tag value if it exists. It no tag was found the fallback namer will be used
func (n *TagOverrideNamer) Name(t *types.Type) string {
if nameOverride := extractTag(n.tagName, append(t.SecondClosestCommentLines, t.CommentLines...)); nameOverride != "" {
return nameOverride
}
return n.fallback.Name(t)
}
// NewTagOverrideNamer creates a namer.Namer which uses the contents of the given tag as
// the name, or falls back to another Namer if the tag is not present.
func NewTagOverrideNamer(tagName string, fallback namer.Namer) namer.Namer {
return &TagOverrideNamer{
tagName: tagName,
fallback: fallback,
}
}
// extractTag gets the comment-tags for the key. If the tag did not exist, it
// returns the empty string.
func extractTag(key string, lines []string) string {
val, present := types.ExtractCommentTags("+", lines)[key]
if !present || len(val) < 1 {
return ""
}
return val[0]
}

61
vendor/k8s.io/code-generator/pkg/util/build.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
gobuild "go/build"
"path"
"path/filepath"
"reflect"
"strings"
)
type empty struct{}
// CurrentPackage returns the go package of the current directory, or "" if it cannot
// be derived from the GOPATH.
func CurrentPackage() string {
for _, root := range gobuild.Default.SrcDirs() {
if pkg, ok := hasSubdir(root, "."); ok {
return pkg
}
}
return ""
}
func hasSubdir(root, dir string) (rel string, ok bool) {
// ensure a tailing separator to properly compare on word-boundaries
const sep = string(filepath.Separator)
root = filepath.Clean(root)
if !strings.HasSuffix(root, sep) {
root += sep
}
// check whether root dir starts with root
dir = filepath.Clean(dir)
if !strings.HasPrefix(dir, root) {
return "", false
}
// cut off root
return filepath.ToSlash(dir[len(root):]), true
}
// BoilerplatePath uses the boilerplate in code-generator by calculating the relative path to it.
func BoilerplatePath() string {
return path.Join(reflect.TypeOf(empty{}).PkgPath(), "/../../hack/boilerplate.go.txt")
}

202
vendor/k8s.io/gengo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

199
vendor/k8s.io/gengo/args/args.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package args has common command-line flags for generation programs.
package args
import (
"bytes"
goflag "flag"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/parser"
"k8s.io/gengo/types"
"github.com/spf13/pflag"
)
// Default returns a defaulted GeneratorArgs. You may change the defaults
// before calling AddFlags.
func Default() *GeneratorArgs {
return &GeneratorArgs{
OutputBase: DefaultSourceTree(),
GoHeaderFilePath: filepath.Join(DefaultSourceTree(), "k8s.io/gengo/boilerplate/boilerplate.go.txt"),
GeneratedBuildTag: "ignore_autogenerated",
GeneratedByCommentTemplate: "// Code generated by GENERATOR_NAME. DO NOT EDIT.",
defaultCommandLineFlags: true,
}
}
// GeneratorArgs has arguments that are passed to generators.
type GeneratorArgs struct {
// Which directories to parse.
InputDirs []string
// Source tree to write results to.
OutputBase string
// Package path within the source tree.
OutputPackagePath string
// Output file name.
OutputFileBaseName string
// Where to get copyright header text.
GoHeaderFilePath string
// If GeneratedByCommentTemplate is set, generate a "Code generated by" comment
// below the bloilerplate, of the format defined by this string.
// Any instances of "GENERATOR_NAME" will be replaced with the name of the code generator.
GeneratedByCommentTemplate string
// If true, only verify, don't write anything.
VerifyOnly bool
// GeneratedBuildTag is the tag used to identify code generated by execution
// of this type. Each generator should use a different tag, and different
// groups of generators (external API that depends on Kube generations) should
// keep tags distinct as well.
GeneratedBuildTag string
// Any custom arguments go here
CustomArgs interface{}
// Whether to use default command line flags
defaultCommandLineFlags bool
}
// WithoutDefaultFlagParsing disables implicit addition of command line flags and parsing.
func (g *GeneratorArgs) WithoutDefaultFlagParsing() *GeneratorArgs {
g.defaultCommandLineFlags = false
return g
}
func (g *GeneratorArgs) AddFlags(fs *pflag.FlagSet) {
fs.StringSliceVarP(&g.InputDirs, "input-dirs", "i", g.InputDirs, "Comma-separated list of import paths to get input types from.")
fs.StringVarP(&g.OutputBase, "output-base", "o", g.OutputBase, "Output base; defaults to $GOPATH/src/ or ./ if $GOPATH is not set.")
fs.StringVarP(&g.OutputPackagePath, "output-package", "p", g.OutputPackagePath, "Base package path.")
fs.StringVarP(&g.OutputFileBaseName, "output-file-base", "O", g.OutputFileBaseName, "Base name (without .go suffix) for output files.")
fs.StringVarP(&g.GoHeaderFilePath, "go-header-file", "h", g.GoHeaderFilePath, "File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.")
fs.BoolVar(&g.VerifyOnly, "verify-only", g.VerifyOnly, "If true, only verify existing output, do not write anything.")
fs.StringVar(&g.GeneratedBuildTag, "build-tag", g.GeneratedBuildTag, "A Go build tag to use to identify files generated by this command. Should be unique.")
}
// LoadGoBoilerplate loads the boilerplate file passed to --go-header-file.
func (g *GeneratorArgs) LoadGoBoilerplate() ([]byte, error) {
b, err := ioutil.ReadFile(g.GoHeaderFilePath)
if err != nil {
return nil, err
}
b = bytes.Replace(b, []byte("YEAR"), []byte(strconv.Itoa(time.Now().Year())), -1)
if g.GeneratedByCommentTemplate != "" {
if len(b) != 0 {
b = append(b, byte('\n'))
}
generatorName := path.Base(os.Args[0])
generatedByComment := strings.Replace(g.GeneratedByCommentTemplate, "GENERATOR_NAME", generatorName, -1)
s := fmt.Sprintf("%s\n\n", generatedByComment)
b = append(b, []byte(s)...)
}
return b, nil
}
// NewBuilder makes a new parser.Builder and populates it with the input
// directories.
func (g *GeneratorArgs) NewBuilder() (*parser.Builder, error) {
b := parser.New()
// Ignore all auto-generated files.
b.AddBuildTags(g.GeneratedBuildTag)
for _, d := range g.InputDirs {
var err error
if strings.HasSuffix(d, "/...") {
err = b.AddDirRecursive(strings.TrimSuffix(d, "/..."))
} else {
err = b.AddDir(d)
}
if err != nil {
return nil, fmt.Errorf("unable to add directory %q: %v", d, err)
}
}
return b, nil
}
// InputIncludes returns true if the given package is a (sub) package of one of
// the InputDirs.
func (g *GeneratorArgs) InputIncludes(p *types.Package) bool {
for _, dir := range g.InputDirs {
d := dir
if strings.HasSuffix(d, "...") {
d = strings.TrimSuffix(d, "...")
}
if strings.HasPrefix(p.Path, d) {
return true
}
}
return false
}
// DefaultSourceTree returns the /src directory of the first entry in $GOPATH.
// If $GOPATH is empty, it returns "./". Useful as a default output location.
func DefaultSourceTree() string {
paths := strings.Split(os.Getenv("GOPATH"), string(filepath.ListSeparator))
if len(paths) > 0 && len(paths[0]) > 0 {
return filepath.Join(paths[0], "src")
}
return "./"
}
// Execute implements main().
// If you don't need any non-default behavior, use as:
// args.Default().Execute(...)
func (g *GeneratorArgs) Execute(nameSystems namer.NameSystems, defaultSystem string, pkgs func(*generator.Context, *GeneratorArgs) generator.Packages) error {
if g.defaultCommandLineFlags {
g.AddFlags(pflag.CommandLine)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
pflag.Parse()
}
b, err := g.NewBuilder()
if err != nil {
return fmt.Errorf("Failed making a parser: %v", err)
}
c, err := generator.NewContext(b, nameSystems, defaultSystem)
if err != nil {
return fmt.Errorf("Failed making a context: %v", err)
}
c.Verify = g.VerifyOnly
packages := pkgs(c, g)
if err := c.ExecutePackages(g.OutputBase, packages); err != nil {
return fmt.Errorf("Failed executing generator: %v", err)
}
return nil
}

View File

@ -0,0 +1,905 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generators
import (
"fmt"
"io"
"path/filepath"
"sort"
"strings"
"k8s.io/gengo/args"
"k8s.io/gengo/examples/set-gen/sets"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/klog"
)
// CustomArgs is used tby the go2idl framework to pass args specific to this
// generator.
type CustomArgs struct {
BoundingDirs []string // Only deal with types rooted under these dirs.
}
// This is the comment tag that carries parameters for deep-copy generation.
const (
tagName = "k8s:deepcopy-gen"
interfacesTagName = tagName + ":interfaces"
interfacesNonPointerTagName = tagName + ":nonpointer-interfaces" // attach the DeepCopy<Interface> methods to the
)
// Known values for the comment tag.
const tagValuePackage = "package"
// tagValue holds parameters from a tagName tag.
type tagValue struct {
value string
register bool
}
func extractTag(comments []string) *tagValue {
tagVals := types.ExtractCommentTags("+", comments)[tagName]
if tagVals == nil {
// No match for the tag.
return nil
}
// If there are multiple values, abort.
if len(tagVals) > 1 {
klog.Fatalf("Found %d %s tags: %q", len(tagVals), tagName, tagVals)
}
// If we got here we are returning something.
tag := &tagValue{}
// Get the primary value.
parts := strings.Split(tagVals[0], ",")
if len(parts) >= 1 {
tag.value = parts[0]
}
// Parse extra arguments.
parts = parts[1:]
for i := range parts {
kv := strings.SplitN(parts[i], "=", 2)
k := kv[0]
v := ""
if len(kv) == 2 {
v = kv[1]
}
switch k {
case "register":
if v != "false" {
tag.register = true
}
default:
klog.Fatalf("Unsupported %s param: %q", tagName, parts[i])
}
}
return tag
}
// TODO: This is created only to reduce number of changes in a single PR.
// Remove it and use PublicNamer instead.
func deepCopyNamer() *namer.NameStrategy {
return &namer.NameStrategy{
Join: func(pre string, in []string, post string) string {
return strings.Join(in, "_")
},
PrependPackageNames: 1,
}
}
// NameSystems returns the name system used by the generators in this package.
func NameSystems() namer.NameSystems {
return namer.NameSystems{
"public": deepCopyNamer(),
"raw": namer.NewRawNamer("", nil),
}
}
// DefaultNameSystem returns the default name system for ordering the types to be
// processed by the generators in this package.
func DefaultNameSystem() string {
return "public"
}
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
klog.Fatalf("Failed loading boilerplate: %v", err)
}
inputs := sets.NewString(context.Inputs...)
packages := generator.Packages{}
header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...)
boundingDirs := []string{}
if customArgs, ok := arguments.CustomArgs.(*CustomArgs); ok {
if customArgs.BoundingDirs == nil {
customArgs.BoundingDirs = context.Inputs
}
for i := range customArgs.BoundingDirs {
// Strip any trailing slashes - they are not exactly "correct" but
// this is friendlier.
boundingDirs = append(boundingDirs, strings.TrimRight(customArgs.BoundingDirs[i], "/"))
}
}
for i := range inputs {
klog.V(5).Infof("Considering pkg %q", i)
pkg := context.Universe[i]
if pkg == nil {
// If the input had no Go files, for example.
continue
}
ptag := extractTag(pkg.Comments)
ptagValue := ""
ptagRegister := false
if ptag != nil {
ptagValue = ptag.value
if ptagValue != tagValuePackage {
klog.Fatalf("Package %v: unsupported %s value: %q", i, tagName, ptagValue)
}
ptagRegister = ptag.register
klog.V(5).Infof(" tag.value: %q, tag.register: %t", ptagValue, ptagRegister)
} else {
klog.V(5).Infof(" no tag")
}
// If the pkg-scoped tag says to generate, we can skip scanning types.
pkgNeedsGeneration := (ptagValue == tagValuePackage)
if !pkgNeedsGeneration {
// If the pkg-scoped tag did not exist, scan all types for one that
// explicitly wants generation.
for _, t := range pkg.Types {
klog.V(5).Infof(" considering type %q", t.Name.String())
ttag := extractTag(t.CommentLines)
if ttag != nil && ttag.value == "true" {
klog.V(5).Infof(" tag=true")
if !copyableType(t) {
klog.Fatalf("Type %v requests deepcopy generation but is not copyable", t)
}
pkgNeedsGeneration = true
break
}
}
}
if pkgNeedsGeneration {
klog.V(3).Infof("Package %q needs generation", i)
path := pkg.Path
// if the source path is within a /vendor/ directory (for example,
// k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow
// generation to output to the proper relative path (under vendor).
// Otherwise, the generator will create the file in the wrong location
// in the output directory.
// TODO: build a more fundamental concept in gengo for dealing with modifications
// to vendored packages.
if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) {
expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase)
if strings.Contains(expandedPath, "/vendor/") {
path = expandedPath
}
}
packages = append(packages,
&generator.DefaultPackage{
PackageName: strings.Split(filepath.Base(pkg.Path), ".")[0],
PackagePath: path,
HeaderText: header,
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
return []generator.Generator{
NewGenDeepCopy(arguments.OutputFileBaseName, pkg.Path, boundingDirs, (ptagValue == tagValuePackage), ptagRegister),
}
},
FilterFunc: func(c *generator.Context, t *types.Type) bool {
return t.Name.Package == pkg.Path
},
})
}
}
return packages
}
// genDeepCopy produces a file with autogenerated deep-copy functions.
type genDeepCopy struct {
generator.DefaultGen
targetPackage string
boundingDirs []string
allTypes bool
registerTypes bool
imports namer.ImportTracker
typesForInit []*types.Type
}
func NewGenDeepCopy(sanitizedName, targetPackage string, boundingDirs []string, allTypes, registerTypes bool) generator.Generator {
return &genDeepCopy{
DefaultGen: generator.DefaultGen{
OptionalName: sanitizedName,
},
targetPackage: targetPackage,
boundingDirs: boundingDirs,
allTypes: allTypes,
registerTypes: registerTypes,
imports: generator.NewImportTracker(),
typesForInit: make([]*types.Type, 0),
}
}
func (g *genDeepCopy) Namers(c *generator.Context) namer.NameSystems {
// Have the raw namer for this file track what it imports.
return namer.NameSystems{
"raw": namer.NewRawNamer(g.targetPackage, g.imports),
}
}
func (g *genDeepCopy) Filter(c *generator.Context, t *types.Type) bool {
// Filter out types not being processed or not copyable within the package.
enabled := g.allTypes
if !enabled {
ttag := extractTag(t.CommentLines)
if ttag != nil && ttag.value == "true" {
enabled = true
}
}
if !enabled {
return false
}
if !copyableType(t) {
klog.V(2).Infof("Type %v is not copyable", t)
return false
}
klog.V(4).Infof("Type %v is copyable", t)
g.typesForInit = append(g.typesForInit, t)
return true
}
func (g *genDeepCopy) copyableAndInBounds(t *types.Type) bool {
if !copyableType(t) {
return false
}
// Only packages within the restricted range can be processed.
if !isRootedUnder(t.Name.Package, g.boundingDirs) {
return false
}
return true
}
// deepCopyMethod returns the signature of a DeepCopy() method, nil or an error
// if the type does not match. This allows more efficient deep copy
// implementations to be defined by the type's author. The correct signature
// for a type T is:
// func (t T) DeepCopy() T
// or:
// func (t *T) DeepCopy() *T
func deepCopyMethod(t *types.Type) (*types.Signature, error) {
f, found := t.Methods["DeepCopy"]
if !found {
return nil, nil
}
if len(f.Signature.Parameters) != 0 {
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected no parameters", t)
}
if len(f.Signature.Results) != 1 {
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected exactly one result", t)
}
ptrResult := f.Signature.Results[0].Kind == types.Pointer && f.Signature.Results[0].Elem.Name == t.Name
nonPtrResult := f.Signature.Results[0].Name == t.Name
if !ptrResult && !nonPtrResult {
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected to return %s or *%s", t, t.Name.Name, t.Name.Name)
}
ptrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Kind == types.Pointer && f.Signature.Receiver.Elem.Name == t.Name
nonPtrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Name == t.Name
if ptrRcvr && !ptrResult {
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a *%s result for a *%s receiver", t, t.Name.Name, t.Name.Name)
}
if nonPtrRcvr && !nonPtrResult {
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a %s result for a %s receiver", t, t.Name.Name, t.Name.Name)
}
return f.Signature, nil
}
// deepCopyMethodOrDie returns the signatrue of a DeepCopy method, nil or calls klog.Fatalf
// if the type does not match.
func deepCopyMethodOrDie(t *types.Type) *types.Signature {
ret, err := deepCopyMethod(t)
if err != nil {
klog.Fatal(err)
}
return ret
}
// deepCopyIntoMethod returns the signature of a DeepCopyInto() method, nil or an error
// if the type is wrong. DeepCopyInto allows more efficient deep copy
// implementations to be defined by the type's author. The correct signature
// for a type T is:
// func (t T) DeepCopyInto(t *T)
// or:
// func (t *T) DeepCopyInto(t *T)
func deepCopyIntoMethod(t *types.Type) (*types.Signature, error) {
f, found := t.Methods["DeepCopyInto"]
if !found {
return nil, nil
}
if len(f.Signature.Parameters) != 1 {
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected exactly one parameter", t)
}
if len(f.Signature.Results) != 0 {
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected no result type", t)
}
ptrParam := f.Signature.Parameters[0].Kind == types.Pointer && f.Signature.Parameters[0].Elem.Name == t.Name
if !ptrParam {
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected parameter of type *%s", t, t.Name.Name)
}
ptrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Kind == types.Pointer && f.Signature.Receiver.Elem.Name == t.Name
nonPtrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Name == t.Name
if !ptrRcvr && !nonPtrRcvr {
// this should never happen
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a receiver of type %s or *%s", t, t.Name.Name, t.Name.Name)
}
return f.Signature, nil
}
// deepCopyIntoMethodOrDie returns the signature of a DeepCopyInto() method, nil or calls klog.Fatalf
// if the type is wrong.
func deepCopyIntoMethodOrDie(t *types.Type) *types.Signature {
ret, err := deepCopyIntoMethod(t)
if err != nil {
klog.Fatal(err)
}
return ret
}
func isRootedUnder(pkg string, roots []string) bool {
// Add trailing / to avoid false matches, e.g. foo/bar vs foo/barn. This
// assumes that bounding dirs do not have trailing slashes.
pkg = pkg + "/"
for _, root := range roots {
if strings.HasPrefix(pkg, root+"/") {
return true
}
}
return false
}
func copyableType(t *types.Type) bool {
// If the type opts out of copy-generation, stop.
ttag := extractTag(t.CommentLines)
if ttag != nil && ttag.value == "false" {
return false
}
// Filter out private types.
if namer.IsPrivateGoName(t.Name.Name) {
return false
}
if t.Kind == types.Alias {
// if the underlying built-in is not deepcopy-able, deepcopy is opt-in through definition of custom methods.
// Note that aliases of builtins, maps, slices can have deepcopy methods.
if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil {
return true
} else {
return t.Underlying.Kind != types.Builtin || copyableType(t.Underlying)
}
}
if t.Kind != types.Struct {
return false
}
return true
}
func underlyingType(t *types.Type) *types.Type {
for t.Kind == types.Alias {
t = t.Underlying
}
return t
}
func (g *genDeepCopy) isOtherPackage(pkg string) bool {
if pkg == g.targetPackage {
return false
}
if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") {
return false
}
return true
}
func (g *genDeepCopy) Imports(c *generator.Context) (imports []string) {
importLines := []string{}
for _, singleImport := range g.imports.ImportLines() {
if g.isOtherPackage(singleImport) {
importLines = append(importLines, singleImport)
}
}
return importLines
}
func argsFromType(ts ...*types.Type) generator.Args {
a := generator.Args{
"type": ts[0],
}
for i, t := range ts {
a[fmt.Sprintf("type%d", i+1)] = t
}
return a
}
func (g *genDeepCopy) Init(c *generator.Context, w io.Writer) error {
return nil
}
func (g *genDeepCopy) needsGeneration(t *types.Type) bool {
tag := extractTag(t.CommentLines)
tv := ""
if tag != nil {
tv = tag.value
if tv != "true" && tv != "false" {
klog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tag.value)
}
}
if g.allTypes && tv == "false" {
// The whole package is being generated, but this type has opted out.
klog.V(5).Infof("Not generating for type %v because type opted out", t)
return false
}
if !g.allTypes && tv != "true" {
// The whole package is NOT being generated, and this type has NOT opted in.
klog.V(5).Infof("Not generating for type %v because type did not opt in", t)
return false
}
return true
}
func extractInterfacesTag(comments []string) []string {
var result []string
values := types.ExtractCommentTags("+", comments)[interfacesTagName]
for _, v := range values {
if len(v) == 0 {
continue
}
intfs := strings.Split(v, ",")
for _, intf := range intfs {
if intf == "" {
continue
}
result = append(result, intf)
}
}
return result
}
func extractNonPointerInterfaces(comments []string) (bool, error) {
values := types.ExtractCommentTags("+", comments)[interfacesNonPointerTagName]
if len(values) == 0 {
return false, nil
}
result := values[0] == "true"
for _, v := range values {
if v == "true" != result {
return false, fmt.Errorf("contradicting %v value %q found to previous value %v", interfacesNonPointerTagName, v, result)
}
}
return result, nil
}
func (g *genDeepCopy) deepCopyableInterfacesInner(c *generator.Context, t *types.Type) ([]*types.Type, error) {
if t.Kind != types.Struct {
return nil, nil
}
intfs := extractInterfacesTag(append(t.SecondClosestCommentLines, t.CommentLines...))
var ts []*types.Type
for _, intf := range intfs {
t := types.ParseFullyQualifiedName(intf)
c.AddDir(t.Package)
intfT := c.Universe.Type(t)
if intfT == nil {
return nil, fmt.Errorf("unknown type %q in %s tag of type %s", intf, interfacesTagName, intfT)
}
if intfT.Kind != types.Interface {
return nil, fmt.Errorf("type %q in %s tag of type %s is not an interface, but: %q", intf, interfacesTagName, t, intfT.Kind)
}
g.imports.AddType(intfT)
ts = append(ts, intfT)
}
return ts, nil
}
// deepCopyableInterfaces returns the interface types to implement and whether they apply to a non-pointer receiver.
func (g *genDeepCopy) deepCopyableInterfaces(c *generator.Context, t *types.Type) ([]*types.Type, bool, error) {
ts, err := g.deepCopyableInterfacesInner(c, t)
if err != nil {
return nil, false, err
}
set := map[string]*types.Type{}
for _, t := range ts {
set[t.String()] = t
}
result := []*types.Type{}
for _, t := range set {
result = append(result, t)
}
TypeSlice(result).Sort() // we need a stable sorting because it determines the order in generation
nonPointerReceiver, err := extractNonPointerInterfaces(append(t.SecondClosestCommentLines, t.CommentLines...))
if err != nil {
return nil, false, err
}
return result, nonPointerReceiver, nil
}
type TypeSlice []*types.Type
func (s TypeSlice) Len() int { return len(s) }
func (s TypeSlice) Less(i, j int) bool { return s[i].String() < s[j].String() }
func (s TypeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s TypeSlice) Sort() { sort.Sort(s) }
func (g *genDeepCopy) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
if !g.needsGeneration(t) {
return nil
}
klog.V(5).Infof("Generating deepcopy function for type %v", t)
sw := generator.NewSnippetWriter(w, c, "$", "$")
args := argsFromType(t)
if deepCopyIntoMethodOrDie(t) == nil {
sw.Do("// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\n", args)
if isReference(t) {
sw.Do("func (in $.type|raw$) DeepCopyInto(out *$.type|raw$) {\n", args)
sw.Do("{in:=&in\n", nil)
} else {
sw.Do("func (in *$.type|raw$) DeepCopyInto(out *$.type|raw$) {\n", args)
}
if deepCopyMethodOrDie(t) != nil {
if t.Methods["DeepCopy"].Signature.Receiver.Kind == types.Pointer {
sw.Do("clone := in.DeepCopy()\n", nil)
sw.Do("*out = *clone\n", nil)
} else {
sw.Do("*out = in.DeepCopy()\n", nil)
}
sw.Do("return\n", nil)
} else {
g.generateFor(t, sw)
sw.Do("return\n", nil)
}
if isReference(t) {
sw.Do("}\n", nil)
}
sw.Do("}\n\n", nil)
}
if deepCopyMethodOrDie(t) == nil {
sw.Do("// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new $.type|raw$.\n", args)
if isReference(t) {
sw.Do("func (in $.type|raw$) DeepCopy() $.type|raw$ {\n", args)
} else {
sw.Do("func (in *$.type|raw$) DeepCopy() *$.type|raw$ {\n", args)
}
sw.Do("if in == nil { return nil }\n", nil)
sw.Do("out := new($.type|raw$)\n", args)
sw.Do("in.DeepCopyInto(out)\n", nil)
if isReference(t) {
sw.Do("return *out\n", nil)
} else {
sw.Do("return out\n", nil)
}
sw.Do("}\n\n", nil)
}
intfs, nonPointerReceiver, err := g.deepCopyableInterfaces(c, t)
if err != nil {
return err
}
for _, intf := range intfs {
sw.Do(fmt.Sprintf("// DeepCopy%s is an autogenerated deepcopy function, copying the receiver, creating a new $.type2|raw$.\n", intf.Name.Name), argsFromType(t, intf))
if nonPointerReceiver {
sw.Do(fmt.Sprintf("func (in $.type|raw$) DeepCopy%s() $.type2|raw$ {\n", intf.Name.Name), argsFromType(t, intf))
sw.Do("return *in.DeepCopy()", nil)
sw.Do("}\n\n", nil)
} else {
sw.Do(fmt.Sprintf("func (in *$.type|raw$) DeepCopy%s() $.type2|raw$ {\n", intf.Name.Name), argsFromType(t, intf))
sw.Do("if c := in.DeepCopy(); c != nil {\n", nil)
sw.Do("return c\n", nil)
sw.Do("}\n", nil)
sw.Do("return nil\n", nil)
sw.Do("}\n\n", nil)
}
}
return sw.Error()
}
// isReference return true for pointer, maps, slices and aliases of those.
func isReference(t *types.Type) bool {
if t.Kind == types.Pointer || t.Kind == types.Map || t.Kind == types.Slice {
return true
}
return t.Kind == types.Alias && isReference(underlyingType(t))
}
// we use the system of shadowing 'in' and 'out' so that the same code is valid
// at any nesting level. This makes the autogenerator easy to understand, and
// the compiler shouldn't care.
func (g *genDeepCopy) generateFor(t *types.Type, sw *generator.SnippetWriter) {
// derive inner types if t is an alias. We call the do* methods below with the alias type.
// basic rule: generate according to inner type, but construct objects with the alias type.
ut := underlyingType(t)
var f func(*types.Type, *generator.SnippetWriter)
switch ut.Kind {
case types.Builtin:
f = g.doBuiltin
case types.Map:
f = g.doMap
case types.Slice:
f = g.doSlice
case types.Struct:
f = g.doStruct
case types.Pointer:
f = g.doPointer
case types.Interface:
// interfaces are handled in-line in the other cases
klog.Fatalf("Hit an interface type %v. This should never happen.", t)
case types.Alias:
// can never happen because we branch on the underlying type which is never an alias
klog.Fatalf("Hit an alias type %v. This should never happen.", t)
default:
klog.Fatalf("Hit an unsupported type %v.", t)
}
f(t, sw)
}
// doBuiltin generates code for a builtin or an alias to a builtin. The generated code is
// is the same for both cases, i.e. it's the code for the underlying type.
func (g *genDeepCopy) doBuiltin(t *types.Type, sw *generator.SnippetWriter) {
if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil {
sw.Do("*out = in.DeepCopy()\n", nil)
return
}
sw.Do("*out = *in\n", nil)
}
// doMap generates code for a map or an alias to a map. The generated code is
// is the same for both cases, i.e. it's the code for the underlying type.
func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) {
ut := underlyingType(t)
uet := underlyingType(ut.Elem)
if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil {
sw.Do("*out = in.DeepCopy()\n", nil)
return
}
if !ut.Key.IsAssignable() {
klog.Fatalf("Hit an unsupported type %v.", uet)
}
sw.Do("*out = make($.|raw$, len(*in))\n", t)
sw.Do("for key, val := range *in {\n", nil)
dc, dci := deepCopyMethodOrDie(ut.Elem), deepCopyIntoMethodOrDie(ut.Elem)
switch {
case dc != nil || dci != nil:
// Note: a DeepCopy exists because it is added if DeepCopyInto is manually defined
leftPointer := ut.Elem.Kind == types.Pointer
rightPointer := !isReference(ut.Elem)
if dc != nil {
rightPointer = dc.Results[0].Kind == types.Pointer
}
if leftPointer == rightPointer {
sw.Do("(*out)[key] = val.DeepCopy()\n", nil)
} else if leftPointer {
sw.Do("x := val.DeepCopy()\n", nil)
sw.Do("(*out)[key] = &x\n", nil)
} else {
sw.Do("(*out)[key] = *val.DeepCopy()\n", nil)
}
case ut.Elem.IsAnonymousStruct(): // not uet here because it needs type cast
sw.Do("(*out)[key] = val\n", nil)
case uet.IsAssignable():
sw.Do("(*out)[key] = val\n", nil)
case uet.Kind == types.Interface:
sw.Do("if val == nil {(*out)[key]=nil} else {\n", nil)
// Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it
// as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang
// parser does not give us the underlying interface name. So we cannot do any better.
sw.Do(fmt.Sprintf("(*out)[key] = val.DeepCopy%s()\n", uet.Name.Name), nil)
sw.Do("}\n", nil)
case uet.Kind == types.Slice || uet.Kind == types.Map || uet.Kind == types.Pointer:
sw.Do("var outVal $.|raw$\n", uet)
sw.Do("if val == nil { (*out)[key] = nil } else {\n", nil)
sw.Do("in, out := &val, &outVal\n", uet)
g.generateFor(ut.Elem, sw)
sw.Do("}\n", nil)
sw.Do("(*out)[key] = outVal\n", nil)
case uet.Kind == types.Struct:
sw.Do("(*out)[key] = *val.DeepCopy()\n", uet)
default:
klog.Fatalf("Hit an unsupported type %v.", uet)
}
sw.Do("}\n", nil)
}
// doSlice generates code for a slice or an alias to a slice. The generated code is
// is the same for both cases, i.e. it's the code for the underlying type.
func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) {
ut := underlyingType(t)
uet := underlyingType(ut.Elem)
if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil {
sw.Do("*out = in.DeepCopy()\n", nil)
return
}
sw.Do("*out = make($.|raw$, len(*in))\n", t)
if deepCopyMethodOrDie(ut.Elem) != nil || deepCopyIntoMethodOrDie(ut.Elem) != nil {
sw.Do("for i := range *in {\n", nil)
// Note: a DeepCopyInto exists because it is added if DeepCopy is manually defined
sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil)
sw.Do("}\n", nil)
} else if uet.Kind == types.Builtin || uet.IsAssignable() {
sw.Do("copy(*out, *in)\n", nil)
} else {
sw.Do("for i := range *in {\n", nil)
if uet.Kind == types.Slice || uet.Kind == types.Map || uet.Kind == types.Pointer || deepCopyMethodOrDie(ut.Elem) != nil || deepCopyIntoMethodOrDie(ut.Elem) != nil {
sw.Do("if (*in)[i] != nil {\n", nil)
sw.Do("in, out := &(*in)[i], &(*out)[i]\n", nil)
g.generateFor(ut.Elem, sw)
sw.Do("}\n", nil)
} else if uet.Kind == types.Interface {
sw.Do("if (*in)[i] != nil {\n", nil)
// Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it
// as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang
// parser does not give us the underlying interface name. So we cannot do any better.
sw.Do(fmt.Sprintf("(*out)[i] = (*in)[i].DeepCopy%s()\n", uet.Name.Name), nil)
sw.Do("}\n", nil)
} else if uet.Kind == types.Struct {
sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil)
} else {
klog.Fatalf("Hit an unsupported type %v.", uet)
}
sw.Do("}\n", nil)
}
}
// doStruct generates code for a struct or an alias to a struct. The generated code is
// is the same for both cases, i.e. it's the code for the underlying type.
func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) {
ut := underlyingType(t)
if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil {
sw.Do("*out = in.DeepCopy()\n", nil)
return
}
// Simple copy covers a lot of cases.
sw.Do("*out = *in\n", nil)
// Now fix-up fields as needed.
for _, m := range ut.Members {
ft := m.Type
uft := underlyingType(ft)
args := generator.Args{
"type": ft,
"kind": ft.Kind,
"name": m.Name,
}
dc, dci := deepCopyMethodOrDie(ft), deepCopyIntoMethodOrDie(ft)
switch {
case dc != nil || dci != nil:
// Note: a DeepCopyInto exists because it is added if DeepCopy is manually defined
leftPointer := ft.Kind == types.Pointer
rightPointer := !isReference(ft)
if dc != nil {
rightPointer = dc.Results[0].Kind == types.Pointer
}
if leftPointer == rightPointer {
sw.Do("out.$.name$ = in.$.name$.DeepCopy()\n", args)
} else if leftPointer {
sw.Do("x := in.$.name$.DeepCopy()\n", args)
sw.Do("out.$.name$ = = &x\n", args)
} else {
sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args)
}
case uft.Kind == types.Builtin:
// the initial *out = *in was enough
case uft.Kind == types.Map, uft.Kind == types.Slice, uft.Kind == types.Pointer:
// Fixup non-nil reference-semantic types.
sw.Do("if in.$.name$ != nil {\n", args)
sw.Do("in, out := &in.$.name$, &out.$.name$\n", args)
g.generateFor(ft, sw)
sw.Do("}\n", nil)
case uft.Kind == types.Struct:
if ft.IsAssignable() {
sw.Do("out.$.name$ = in.$.name$\n", args)
} else {
sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args)
}
case uft.Kind == types.Interface:
sw.Do("if in.$.name$ != nil {\n", args)
// Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it
// as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang
// parser does not give us the underlying interface name. So we cannot do any better.
sw.Do(fmt.Sprintf("out.$.name$ = in.$.name$.DeepCopy%s()\n", uft.Name.Name), args)
sw.Do("}\n", nil)
default:
klog.Fatalf("Hit an unsupported type %v.", uft)
}
}
}
// doPointer generates code for a pointer or an alias to a pointer. The generated code is
// is the same for both cases, i.e. it's the code for the underlying type.
func (g *genDeepCopy) doPointer(t *types.Type, sw *generator.SnippetWriter) {
ut := underlyingType(t)
uet := underlyingType(ut.Elem)
dc, dci := deepCopyMethodOrDie(ut.Elem), deepCopyIntoMethodOrDie(ut.Elem)
switch {
case dc != nil || dci != nil:
rightPointer := !isReference(ut.Elem)
if dc != nil {
rightPointer = dc.Results[0].Kind == types.Pointer
}
if rightPointer {
sw.Do("*out = (*in).DeepCopy()\n", nil)
} else {
sw.Do("x := (*in).DeepCopy()\n", nil)
sw.Do("*out = &x\n", nil)
}
case uet.IsAssignable():
sw.Do("*out = new($.Elem|raw$)\n", ut)
sw.Do("**out = **in", nil)
case uet.Kind == types.Map, uet.Kind == types.Slice, uet.Kind == types.Pointer:
sw.Do("*out = new($.Elem|raw$)\n", ut)
sw.Do("if **in != nil {\n", nil)
sw.Do("in, out := *in, *out\n", nil)
g.generateFor(uet, sw)
sw.Do("}\n", nil)
case uet.Kind == types.Struct:
sw.Do("*out = new($.Elem|raw$)\n", ut)
sw.Do("(*in).DeepCopyInto(*out)\n", nil)
default:
klog.Fatalf("Hit an unsupported type %v.", uet)
}
}

Some files were not shown because too many files have changed in this diff Show More