staticcheck (#313)

* CI: use staticcheck for linting

This commit switches the linter for Go code from golint to staticcheck.
Golint has been deprecated since last year and staticcheck is a
recommended replacement.

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>

* revendor

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>

* cmd,pkg: fix lint warnings

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>
This commit is contained in:
Lucas Servén Marín
2022-05-19 19:45:43 +02:00
committed by GitHub
parent 93f46e03ea
commit 50fbc2eec2
227 changed files with 55458 additions and 2689 deletions

20
vendor/honnef.co/go/tools/LICENSE vendored Normal file
View File

@@ -0,0 +1,20 @@
Copyright (c) 2016 Dominik Honnef
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,121 @@
Staticcheck and its related tools make use of third party projects,
either by reusing their code, or by statically linking them into
resulting binaries. These projects are:
* The Go Programming Language - https://golang.org/
golang.org/x/mod - https://github.com/golang/mod
golang.org/x/tools - https://github.com/golang/tools
golang.org/x/sys - https://github.com/golang/sys
golang.org/x/xerrors - https://github.com/golang/xerrors
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* github.com/BurntSushi/toml - https://github.com/BurntSushi/toml
The MIT License (MIT)
Copyright (c) 2013 TOML authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
* gogrep - https://github.com/mvdan/gogrep
Copyright (c) 2017, Daniel Martí. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* gosmith - https://github.com/dvyukov/gosmith
Copyright (c) 2014 Dmitry Vyukov. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* The name of Dmitry Vyukov may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,342 @@
// Package code answers structural and type questions about Go code.
package code
import (
"flag"
"fmt"
"go/ast"
"go/constant"
"go/token"
"go/types"
"strings"
"honnef.co/go/tools/analysis/facts"
"honnef.co/go/tools/go/ast/astutil"
"honnef.co/go/tools/go/types/typeutil"
"honnef.co/go/tools/pattern"
"golang.org/x/exp/typeparams"
"golang.org/x/tools/go/analysis"
)
type Positioner interface {
Pos() token.Pos
}
func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool {
return typeutil.IsType(pass.TypesInfo.TypeOf(expr), name)
}
func IsInTest(pass *analysis.Pass, node Positioner) bool {
// FIXME(dh): this doesn't work for global variables with
// initializers
f := pass.Fset.File(node.Pos())
return f != nil && strings.HasSuffix(f.Name(), "_test.go")
}
// IsMain reports whether the package being processed is a package
// main.
func IsMain(pass *analysis.Pass) bool {
return pass.Pkg.Name() == "main"
}
// IsMainLike reports whether the package being processed is a
// main-like package. A main-like package is a package that is
// package main, or that is intended to be used by a tool framework
// such as cobra to implement a command.
//
// Note that this function errs on the side of false positives; it may
// return true for packages that aren't main-like. IsMainLike is
// intended for analyses that wish to suppress diagnostics for
// main-like packages to avoid false positives.
func IsMainLike(pass *analysis.Pass) bool {
if pass.Pkg.Name() == "main" {
return true
}
for _, imp := range pass.Pkg.Imports() {
if imp.Path() == "github.com/spf13/cobra" {
return true
}
}
return false
}
func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string {
info := pass.TypesInfo
sel := info.Selections[expr]
if sel == nil {
if x, ok := expr.X.(*ast.Ident); ok {
pkg, ok := info.ObjectOf(x).(*types.PkgName)
if !ok {
// This shouldn't happen
return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name)
}
return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name)
}
panic(fmt.Sprintf("unsupported selector: %v", expr))
}
if v, ok := sel.Obj().(*types.Var); ok && v.IsField() {
return fmt.Sprintf("(%s).%s", typeutil.DereferenceR(sel.Recv()), sel.Obj().Name())
} else {
return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name())
}
}
func IsNil(pass *analysis.Pass, expr ast.Expr) bool {
return pass.TypesInfo.Types[expr].IsNil()
}
func BoolConst(pass *analysis.Pass, expr ast.Expr) bool {
val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
return constant.BoolVal(val)
}
func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool {
// We explicitly don't support typed bools because more often than
// not, custom bool types are used as binary enums and the
// explicit comparison is desired.
ident, ok := expr.(*ast.Ident)
if !ok {
return false
}
obj := pass.TypesInfo.ObjectOf(ident)
c, ok := obj.(*types.Const)
if !ok {
return false
}
basic, ok := c.Type().(*types.Basic)
if !ok {
return false
}
if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool {
return false
}
return true
}
func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) {
tv := pass.TypesInfo.Types[expr]
if tv.Value == nil {
return 0, false
}
if tv.Value.Kind() != constant.Int {
return 0, false
}
return constant.Int64Val(tv.Value)
}
func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) {
val := pass.TypesInfo.Types[expr].Value
if val == nil {
return "", false
}
if val.Kind() != constant.String {
return "", false
}
return constant.StringVal(val), true
}
func CallName(pass *analysis.Pass, call *ast.CallExpr) string {
fun := astutil.Unparen(call.Fun)
// Instantiating a function cannot return another generic function, so doing this once is enough
switch idx := fun.(type) {
case *ast.IndexExpr:
fun = idx.X
case *typeparams.IndexListExpr:
fun = idx.X
}
// (foo)[T] is not a valid instantiationg, so no need to unparen again.
switch fun := fun.(type) {
case *ast.SelectorExpr:
fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func)
if !ok {
return ""
}
return typeutil.FuncName(fn)
case *ast.Ident:
obj := pass.TypesInfo.ObjectOf(fun)
switch obj := obj.(type) {
case *types.Func:
return typeutil.FuncName(obj)
case *types.Builtin:
return obj.Name()
default:
return ""
}
default:
return ""
}
}
func IsCallTo(pass *analysis.Pass, node ast.Node, name string) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return false
}
return CallName(pass, call) == name
}
func IsCallToAny(pass *analysis.Pass, node ast.Node, names ...string) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return false
}
q := CallName(pass, call)
for _, name := range names {
if q == name {
return true
}
}
return false
}
func File(pass *analysis.Pass, node Positioner) *ast.File {
m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File)
return m[pass.Fset.File(node.Pos())]
}
// IsGenerated reports whether pos is in a generated file, It ignores
// //line directives.
func IsGenerated(pass *analysis.Pass, pos token.Pos) bool {
_, ok := Generator(pass, pos)
return ok
}
// Generator returns the generator that generated the file containing
// pos. It ignores //line directives.
func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) {
file := pass.Fset.PositionFor(pos, false).Filename
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
g, ok := m[file]
return g, ok
}
// MayHaveSideEffects reports whether expr may have side effects. If
// the purity argument is nil, this function implements a purely
// syntactic check, meaning that any function call may have side
// effects, regardless of the called function's body. Otherwise,
// purity will be consulted to determine the purity of function calls.
func MayHaveSideEffects(pass *analysis.Pass, expr ast.Expr, purity facts.PurityResult) bool {
switch expr := expr.(type) {
case *ast.BadExpr:
return true
case *ast.Ellipsis:
return MayHaveSideEffects(pass, expr.Elt, purity)
case *ast.FuncLit:
// the literal itself cannot have side effects, only calling it
// might, which is handled by CallExpr.
return false
case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
// types cannot have side effects
return false
case *ast.BasicLit:
return false
case *ast.BinaryExpr:
return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Y, purity)
case *ast.CallExpr:
if purity == nil {
return true
}
switch obj := typeutil.Callee(pass.TypesInfo, expr).(type) {
case *types.Func:
if _, ok := purity[obj]; !ok {
return true
}
case *types.Builtin:
switch obj.Name() {
case "len", "cap":
default:
return true
}
default:
return true
}
for _, arg := range expr.Args {
if MayHaveSideEffects(pass, arg, purity) {
return true
}
}
return false
case *ast.CompositeLit:
if MayHaveSideEffects(pass, expr.Type, purity) {
return true
}
for _, elt := range expr.Elts {
if MayHaveSideEffects(pass, elt, purity) {
return true
}
}
return false
case *ast.Ident:
return false
case *ast.IndexExpr:
return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Index, purity)
case *typeparams.IndexListExpr:
// In theory, none of the checks are necessary, as IndexListExpr only involves types. But there is no harm in
// being safe.
if MayHaveSideEffects(pass, expr.X, purity) {
return true
}
for _, idx := range expr.Indices {
if MayHaveSideEffects(pass, idx, purity) {
return true
}
}
return false
case *ast.KeyValueExpr:
return MayHaveSideEffects(pass, expr.Key, purity) || MayHaveSideEffects(pass, expr.Value, purity)
case *ast.SelectorExpr:
return MayHaveSideEffects(pass, expr.X, purity)
case *ast.SliceExpr:
return MayHaveSideEffects(pass, expr.X, purity) ||
MayHaveSideEffects(pass, expr.Low, purity) ||
MayHaveSideEffects(pass, expr.High, purity) ||
MayHaveSideEffects(pass, expr.Max, purity)
case *ast.StarExpr:
return MayHaveSideEffects(pass, expr.X, purity)
case *ast.TypeAssertExpr:
return MayHaveSideEffects(pass, expr.X, purity)
case *ast.UnaryExpr:
if MayHaveSideEffects(pass, expr.X, purity) {
return true
}
return expr.Op == token.ARROW || expr.Op == token.AND
case *ast.ParenExpr:
return MayHaveSideEffects(pass, expr.X, purity)
case nil:
return false
default:
panic(fmt.Sprintf("internal error: unhandled type %T", expr))
}
}
func IsGoVersion(pass *analysis.Pass, minor int) bool {
f, ok := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter)
if !ok {
panic("requested Go version, but analyzer has no version flag")
}
version := f.Get().(int)
return version >= minor
}
var integerLiteralQ = pattern.MustParse(`(IntegerLiteral tv)`)
func IntegerLiteral(pass *analysis.Pass, node ast.Node) (types.TypeAndValue, bool) {
m, ok := Match(pass, integerLiteralQ, node)
if !ok {
return types.TypeAndValue{}, false
}
return m.State["tv"].(types.TypeAndValue), true
}
func IsIntegerLiteral(pass *analysis.Pass, node ast.Node, value constant.Value) bool {
tv, ok := IntegerLiteral(pass, node)
if !ok {
return false
}
return constant.Compare(tv.Value, token.EQL, value)
}

View File

@@ -0,0 +1,51 @@
package code
import (
"bytes"
"go/ast"
"go/format"
"honnef.co/go/tools/pattern"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
func Preorder(pass *analysis.Pass, fn func(ast.Node), types ...ast.Node) {
pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder(types, fn)
}
func PreorderStack(pass *analysis.Pass, fn func(ast.Node, []ast.Node), types ...ast.Node) {
pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).WithStack(types, func(n ast.Node, push bool, stack []ast.Node) (proceed bool) {
if push {
fn(n, stack)
}
return true
})
}
func Match(pass *analysis.Pass, q pattern.Pattern, node ast.Node) (*pattern.Matcher, bool) {
// Note that we ignore q.Relevant callers of Match usually use
// AST inspectors that already filter on nodes we're interested
// in.
m := &pattern.Matcher{TypesInfo: pass.TypesInfo}
ok := m.Match(q.Root, node)
return m, ok
}
func MatchAndEdit(pass *analysis.Pass, before, after pattern.Pattern, node ast.Node) (*pattern.Matcher, []analysis.TextEdit, bool) {
m, ok := Match(pass, before, node)
if !ok {
return m, nil, false
}
r := pattern.NodeToAST(after.Root, m.State)
buf := &bytes.Buffer{}
format.Node(buf, pass.Fset, r)
edit := []analysis.TextEdit{{
Pos: node.Pos(),
End: node.End(),
NewText: buf.Bytes(),
}}
return m, edit, true
}

View File

@@ -0,0 +1,83 @@
// Package edit contains helpers for creating suggested fixes.
package edit
import (
"bytes"
"go/ast"
"go/format"
"go/token"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/pattern"
)
// Ranger describes values that have a start and end position.
// In most cases these are either ast.Node or manually constructed ranges.
type Ranger interface {
Pos() token.Pos
End() token.Pos
}
// Range implements the Ranger interface.
type Range [2]token.Pos
func (r Range) Pos() token.Pos { return r[0] }
func (r Range) End() token.Pos { return r[1] }
// ReplaceWithString replaces a range with a string.
func ReplaceWithString(old Ranger, new string) analysis.TextEdit {
return analysis.TextEdit{
Pos: old.Pos(),
End: old.End(),
NewText: []byte(new),
}
}
// ReplaceWithNode replaces a range with an AST node.
func ReplaceWithNode(fset *token.FileSet, old Ranger, new ast.Node) analysis.TextEdit {
buf := &bytes.Buffer{}
if err := format.Node(buf, fset, new); err != nil {
panic("internal error: " + err.Error())
}
return analysis.TextEdit{
Pos: old.Pos(),
End: old.End(),
NewText: buf.Bytes(),
}
}
// ReplaceWithPattern replaces a range with the result of executing a pattern.
func ReplaceWithPattern(fset *token.FileSet, old Ranger, new pattern.Pattern, state pattern.State) analysis.TextEdit {
r := pattern.NodeToAST(new.Root, state)
buf := &bytes.Buffer{}
format.Node(buf, fset, r)
return analysis.TextEdit{
Pos: old.Pos(),
End: old.End(),
NewText: buf.Bytes(),
}
}
// Delete deletes a range of code.
func Delete(old Ranger) analysis.TextEdit {
return analysis.TextEdit{
Pos: old.Pos(),
End: old.End(),
NewText: nil,
}
}
func Fix(msg string, edits ...analysis.TextEdit) analysis.SuggestedFix {
return analysis.SuggestedFix{
Message: msg,
TextEdits: edits,
}
}
// Selector creates a new selector expression.
func Selector(x, sel string) *ast.SelectorExpr {
return &ast.SelectorExpr{
X: &ast.Ident{Name: x},
Sel: &ast.Ident{Name: sel},
}
}

View File

@@ -0,0 +1,145 @@
package facts
import (
"go/ast"
"go/token"
"go/types"
"reflect"
"strings"
"golang.org/x/tools/go/analysis"
)
type IsDeprecated struct{ Msg string }
func (*IsDeprecated) AFact() {}
func (d *IsDeprecated) String() string { return "Deprecated: " + d.Msg }
type DeprecatedResult struct {
Objects map[types.Object]*IsDeprecated
Packages map[*types.Package]*IsDeprecated
}
var Deprecated = &analysis.Analyzer{
Name: "fact_deprecated",
Doc: "Mark deprecated objects",
Run: deprecated,
FactTypes: []analysis.Fact{(*IsDeprecated)(nil)},
ResultType: reflect.TypeOf(DeprecatedResult{}),
}
func deprecated(pass *analysis.Pass) (interface{}, error) {
var names []*ast.Ident
extractDeprecatedMessage := func(docs []*ast.CommentGroup) string {
for _, doc := range docs {
if doc == nil {
continue
}
parts := strings.Split(doc.Text(), "\n\n")
for _, part := range parts {
if !strings.HasPrefix(part, "Deprecated: ") {
continue
}
alt := part[len("Deprecated: "):]
alt = strings.Replace(alt, "\n", " ", -1)
return alt
}
}
return ""
}
doDocs := func(names []*ast.Ident, docs []*ast.CommentGroup) {
alt := extractDeprecatedMessage(docs)
if alt == "" {
return
}
for _, name := range names {
obj := pass.TypesInfo.ObjectOf(name)
pass.ExportObjectFact(obj, &IsDeprecated{alt})
}
}
var docs []*ast.CommentGroup
for _, f := range pass.Files {
docs = append(docs, f.Doc)
}
if alt := extractDeprecatedMessage(docs); alt != "" {
// Don't mark package syscall as deprecated, even though
// it is. A lot of people still use it for simple
// constants like SIGKILL, and I am not comfortable
// telling them to use x/sys for that.
if pass.Pkg.Path() != "syscall" {
pass.ExportPackageFact(&IsDeprecated{alt})
}
}
docs = docs[:0]
for _, f := range pass.Files {
fn := func(node ast.Node) bool {
if node == nil {
return true
}
var ret bool
switch node := node.(type) {
case *ast.GenDecl:
switch node.Tok {
case token.TYPE, token.CONST, token.VAR:
docs = append(docs, node.Doc)
return true
default:
return false
}
case *ast.FuncDecl:
docs = append(docs, node.Doc)
names = []*ast.Ident{node.Name}
ret = false
case *ast.TypeSpec:
docs = append(docs, node.Doc)
names = []*ast.Ident{node.Name}
ret = true
case *ast.ValueSpec:
docs = append(docs, node.Doc)
names = node.Names
ret = false
case *ast.File:
return true
case *ast.StructType:
for _, field := range node.Fields.List {
doDocs(field.Names, []*ast.CommentGroup{field.Doc})
}
return false
case *ast.InterfaceType:
for _, field := range node.Methods.List {
doDocs(field.Names, []*ast.CommentGroup{field.Doc})
}
return false
default:
return false
}
if len(names) == 0 || len(docs) == 0 {
return ret
}
doDocs(names, docs)
docs = docs[:0]
names = nil
return ret
}
ast.Inspect(f, fn)
}
out := DeprecatedResult{
Objects: map[types.Object]*IsDeprecated{},
Packages: map[*types.Package]*IsDeprecated{},
}
for _, fact := range pass.AllObjectFacts() {
out.Objects[fact.Object] = fact.Fact.(*IsDeprecated)
}
for _, fact := range pass.AllPackageFacts() {
out.Packages[fact.Package] = fact.Fact.(*IsDeprecated)
}
return out, nil
}

View File

@@ -0,0 +1,20 @@
package facts
import (
"reflect"
"golang.org/x/tools/go/analysis"
"honnef.co/go/tools/analysis/lint"
)
func directives(pass *analysis.Pass) (interface{}, error) {
return lint.ParseDirectives(pass.Files, pass.Fset), nil
}
var Directives = &analysis.Analyzer{
Name: "directives",
Doc: "extracts linter directives",
Run: directives,
RunDespiteErrors: true,
ResultType: reflect.TypeOf([]lint.Directive{}),
}

View File

@@ -0,0 +1,97 @@
package facts
import (
"bufio"
"bytes"
"io"
"os"
"reflect"
"strings"
"golang.org/x/tools/go/analysis"
)
type Generator int
// A list of known generators we can detect
const (
Unknown Generator = iota
Goyacc
Cgo
Stringer
ProtocGenGo
)
var (
// used by cgo before Go 1.11
oldCgo = []byte("// Created by cgo - DO NOT EDIT")
prefix = []byte("// Code generated ")
suffix = []byte(" DO NOT EDIT.")
nl = []byte("\n")
crnl = []byte("\r\n")
)
func isGenerated(path string) (Generator, bool) {
f, err := os.Open(path)
if err != nil {
return 0, false
}
defer f.Close()
br := bufio.NewReader(f)
for {
s, err := br.ReadBytes('\n')
if err != nil && err != io.EOF {
return 0, false
}
s = bytes.TrimSuffix(s, crnl)
s = bytes.TrimSuffix(s, nl)
if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {
if len(s)-len(suffix) < len(prefix) {
return Unknown, true
}
text := string(s[len(prefix) : len(s)-len(suffix)])
switch text {
case "by goyacc.":
return Goyacc, true
case "by cmd/cgo;":
return Cgo, true
case "by protoc-gen-go.":
return ProtocGenGo, true
}
if strings.HasPrefix(text, `by "stringer `) {
return Stringer, true
}
if strings.HasPrefix(text, `by goyacc `) {
return Goyacc, true
}
return Unknown, true
}
if bytes.Equal(s, oldCgo) {
return Cgo, true
}
if err == io.EOF {
break
}
}
return 0, false
}
var Generated = &analysis.Analyzer{
Name: "isgenerated",
Doc: "annotate file names that have been code generated",
Run: func(pass *analysis.Pass) (interface{}, error) {
m := map[string]Generator{}
for _, f := range pass.Files {
path := pass.Fset.PositionFor(f.Pos(), false).Filename
g, ok := isGenerated(path)
if ok {
m[path] = g
}
}
return m, nil
},
RunDespiteErrors: true,
ResultType: reflect.TypeOf(map[string]Generator{}),
}

View File

@@ -0,0 +1,251 @@
package nilness
import (
"fmt"
"go/token"
"go/types"
"reflect"
"honnef.co/go/tools/go/ir"
"honnef.co/go/tools/go/types/typeutil"
"honnef.co/go/tools/internal/passes/buildir"
"golang.org/x/tools/go/analysis"
)
// neverReturnsNilFact denotes that a function's return value will never
// be nil (typed or untyped). The analysis errs on the side of false
// negatives.
type neverReturnsNilFact struct {
Rets []neverNilness
}
func (*neverReturnsNilFact) AFact() {}
func (fact *neverReturnsNilFact) String() string {
return fmt.Sprintf("never returns nil: %v", fact.Rets)
}
type Result struct {
m map[*types.Func][]neverNilness
}
var Analysis = &analysis.Analyzer{
Name: "nilness",
Doc: "Annotates return values that will never be nil (typed or untyped)",
Run: run,
Requires: []*analysis.Analyzer{buildir.Analyzer},
FactTypes: []analysis.Fact{(*neverReturnsNilFact)(nil)},
ResultType: reflect.TypeOf((*Result)(nil)),
}
// MayReturnNil reports whether the ret's return value of fn might be
// a typed or untyped nil value. The value of ret is zero-based. When
// globalOnly is true, the only possible nil values are global
// variables.
//
// The analysis has false positives: MayReturnNil can incorrectly
// report true, but never incorrectly reports false.
func (r *Result) MayReturnNil(fn *types.Func, ret int) (yes bool, globalOnly bool) {
if !typeutil.IsPointerLike(fn.Type().(*types.Signature).Results().At(ret).Type()) {
return false, false
}
if len(r.m[fn]) == 0 {
return true, false
}
v := r.m[fn][ret]
return v != neverNil, v == onlyGlobal
}
func run(pass *analysis.Pass) (interface{}, error) {
seen := map[*ir.Function]struct{}{}
out := &Result{
m: map[*types.Func][]neverNilness{},
}
for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
impl(pass, fn, seen)
}
for _, fact := range pass.AllObjectFacts() {
out.m[fact.Object.(*types.Func)] = fact.Fact.(*neverReturnsNilFact).Rets
}
return out, nil
}
type neverNilness uint8
const (
neverNil neverNilness = 1
onlyGlobal neverNilness = 2
nilly neverNilness = 3
)
func (n neverNilness) String() string {
switch n {
case neverNil:
return "never"
case onlyGlobal:
return "global"
case nilly:
return "nil"
default:
return "BUG"
}
}
func impl(pass *analysis.Pass, fn *ir.Function, seenFns map[*ir.Function]struct{}) []neverNilness {
if fn.Object() == nil {
// TODO(dh): support closures
return nil
}
if fact := new(neverReturnsNilFact); pass.ImportObjectFact(fn.Object(), fact) {
return fact.Rets
}
if fn.Pkg != pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg {
return nil
}
if fn.Blocks == nil {
return nil
}
if _, ok := seenFns[fn]; ok {
// break recursion
return nil
}
seenFns[fn] = struct{}{}
seen := map[ir.Value]struct{}{}
var mightReturnNil func(v ir.Value) neverNilness
mightReturnNil = func(v ir.Value) neverNilness {
if _, ok := seen[v]; ok {
// break cycle
return nilly
}
if !typeutil.IsPointerLike(v.Type()) {
return neverNil
}
seen[v] = struct{}{}
switch v := v.(type) {
case *ir.MakeInterface:
return mightReturnNil(v.X)
case *ir.Convert:
return mightReturnNil(v.X)
case *ir.SliceToArrayPointer:
if typeutil.CoreType(v.Type()).(*types.Pointer).Elem().Underlying().(*types.Array).Len() == 0 {
return mightReturnNil(v.X)
} else {
// converting a slice to an array pointer of length > 0 panics if the slice is nil
return neverNil
}
case *ir.Slice:
return mightReturnNil(v.X)
case *ir.Phi:
ret := neverNil
for _, e := range v.Edges {
if n := mightReturnNil(e); n > ret {
ret = n
}
}
return ret
case *ir.Extract:
switch d := v.Tuple.(type) {
case *ir.Call:
if callee := d.Call.StaticCallee(); callee != nil {
ret := impl(pass, callee, seenFns)
if len(ret) == 0 {
return nilly
}
return ret[v.Index]
} else {
return nilly
}
case *ir.TypeAssert, *ir.Next, *ir.Select, *ir.MapLookup, *ir.TypeSwitch, *ir.Recv, *ir.Sigma:
// we don't need to look at the Extract's index
// because we've already checked its type.
return nilly
default:
panic(fmt.Sprintf("internal error: unhandled type %T", d))
}
case *ir.Call:
if callee := v.Call.StaticCallee(); callee != nil {
ret := impl(pass, callee, seenFns)
if len(ret) == 0 {
return nilly
}
return ret[0]
} else {
return nilly
}
case *ir.BinOp, *ir.UnOp, *ir.Alloc, *ir.FieldAddr, *ir.IndexAddr, *ir.Global, *ir.MakeSlice, *ir.MakeClosure, *ir.Function, *ir.MakeMap, *ir.MakeChan:
return neverNil
case *ir.Sigma:
iff, ok := v.From.Control().(*ir.If)
if !ok {
return nilly
}
binop, ok := iff.Cond.(*ir.BinOp)
if !ok {
return nilly
}
isNil := func(v ir.Value) bool {
k, ok := v.(*ir.Const)
if !ok {
return false
}
return k.Value == nil
}
if binop.X == v.X && isNil(binop.Y) || binop.Y == v.X && isNil(binop.X) {
op := binop.Op
if v.From.Succs[0] != v.Block() {
// we're in the false branch, negate op
switch op {
case token.EQL:
op = token.NEQ
case token.NEQ:
op = token.EQL
default:
panic(fmt.Sprintf("internal error: unhandled token %v", op))
}
}
switch op {
case token.EQL:
return nilly
case token.NEQ:
return neverNil
default:
panic(fmt.Sprintf("internal error: unhandled token %v", op))
}
}
return nilly
case *ir.ChangeType:
return mightReturnNil(v.X)
case *ir.Load:
if _, ok := v.X.(*ir.Global); ok {
return onlyGlobal
}
return nilly
case *ir.AggregateConst:
return neverNil
case *ir.TypeAssert, *ir.ChangeInterface, *ir.Field, *ir.Const, *ir.GenericConst, *ir.Index, *ir.MapLookup, *ir.Parameter, *ir.Recv, *ir.TypeSwitch:
return nilly
default:
panic(fmt.Sprintf("internal error: unhandled type %T", v))
}
}
ret := fn.Exit.Control().(*ir.Return)
out := make([]neverNilness, len(ret.Results))
export := false
for i, v := range ret.Results {
v := mightReturnNil(v)
out[i] = v
if v != nilly && typeutil.IsPointerLike(fn.Signature.Results().At(i).Type()) {
export = true
}
}
if export {
pass.ExportObjectFact(fn.Object(), &neverReturnsNilFact{out})
}
return out
}

View File

@@ -0,0 +1,178 @@
package facts
import (
"go/types"
"reflect"
"honnef.co/go/tools/go/ir"
"honnef.co/go/tools/go/ir/irutil"
"honnef.co/go/tools/internal/passes/buildir"
"golang.org/x/tools/go/analysis"
)
type IsPure struct{}
func (*IsPure) AFact() {}
func (d *IsPure) String() string { return "is pure" }
type PurityResult map[*types.Func]*IsPure
var Purity = &analysis.Analyzer{
Name: "fact_purity",
Doc: "Mark pure functions",
Run: purity,
Requires: []*analysis.Analyzer{buildir.Analyzer},
FactTypes: []analysis.Fact{(*IsPure)(nil)},
ResultType: reflect.TypeOf(PurityResult{}),
}
var pureStdlib = map[string]struct{}{
"errors.New": {},
"fmt.Errorf": {},
"fmt.Sprintf": {},
"fmt.Sprint": {},
"sort.Reverse": {},
"strings.Map": {},
"strings.Repeat": {},
"strings.Replace": {},
"strings.Title": {},
"strings.ToLower": {},
"strings.ToLowerSpecial": {},
"strings.ToTitle": {},
"strings.ToTitleSpecial": {},
"strings.ToUpper": {},
"strings.ToUpperSpecial": {},
"strings.Trim": {},
"strings.TrimFunc": {},
"strings.TrimLeft": {},
"strings.TrimLeftFunc": {},
"strings.TrimPrefix": {},
"strings.TrimRight": {},
"strings.TrimRightFunc": {},
"strings.TrimSpace": {},
"strings.TrimSuffix": {},
"(*net/http.Request).WithContext": {},
}
func purity(pass *analysis.Pass) (interface{}, error) {
seen := map[*ir.Function]struct{}{}
irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg
var check func(fn *ir.Function) (ret bool)
check = func(fn *ir.Function) (ret bool) {
if fn.Object() == nil {
// TODO(dh): support closures
return false
}
if pass.ImportObjectFact(fn.Object(), new(IsPure)) {
return true
}
if fn.Pkg != irpkg {
// Function is in another package but wasn't marked as
// pure, ergo it isn't pure
return false
}
// Break recursion
if _, ok := seen[fn]; ok {
return false
}
seen[fn] = struct{}{}
defer func() {
if ret {
pass.ExportObjectFact(fn.Object(), &IsPure{})
}
}()
if irutil.IsStub(fn) {
return false
}
if _, ok := pureStdlib[fn.Object().(*types.Func).FullName()]; ok {
return true
}
if fn.Signature.Results().Len() == 0 {
// A function with no return values is empty or is doing some
// work we cannot see (for example because of build tags);
// don't consider it pure.
return false
}
for _, param := range fn.Params {
// TODO(dh): this may not be strictly correct. pure code
// can, to an extent, operate on non-basic types.
if _, ok := param.Type().Underlying().(*types.Basic); !ok {
return false
}
}
// Don't consider external functions pure.
if fn.Blocks == nil {
return false
}
checkCall := func(common *ir.CallCommon) bool {
if common.IsInvoke() {
return false
}
builtin, ok := common.Value.(*ir.Builtin)
if !ok {
if common.StaticCallee() != fn {
if common.StaticCallee() == nil {
return false
}
if !check(common.StaticCallee()) {
return false
}
}
} else {
switch builtin.Name() {
case "len", "cap":
default:
return false
}
}
return true
}
for _, b := range fn.Blocks {
for _, ins := range b.Instrs {
switch ins := ins.(type) {
case *ir.Call:
if !checkCall(ins.Common()) {
return false
}
case *ir.Defer:
if !checkCall(&ins.Call) {
return false
}
case *ir.Select:
return false
case *ir.Send:
return false
case *ir.Go:
return false
case *ir.Panic:
return false
case *ir.Store:
return false
case *ir.FieldAddr:
return false
case *ir.Alloc:
return false
case *ir.Load:
return false
}
}
}
return true
}
for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
check(fn)
}
out := PurityResult{}
for _, fact := range pass.AllObjectFacts() {
out[fact.Object.(*types.Func)] = fact.Fact.(*IsPure)
}
return out, nil
}

View File

@@ -0,0 +1,24 @@
package facts
import (
"go/ast"
"go/token"
"reflect"
"golang.org/x/tools/go/analysis"
)
var TokenFile = &analysis.Analyzer{
Name: "tokenfileanalyzer",
Doc: "creates a mapping of *token.File to *ast.File",
Run: func(pass *analysis.Pass) (interface{}, error) {
m := map[*token.File]*ast.File{}
for _, af := range pass.Files {
tf := pass.Fset.File(af.Pos())
m[tf] = af
}
return m, nil
},
RunDespiteErrors: true,
ResultType: reflect.TypeOf(map[*token.File]*ast.File{}),
}

View File

@@ -0,0 +1,253 @@
package typedness
import (
"fmt"
"go/token"
"go/types"
"reflect"
"honnef.co/go/tools/go/ir"
"honnef.co/go/tools/go/ir/irutil"
"honnef.co/go/tools/internal/passes/buildir"
"golang.org/x/exp/typeparams"
"golang.org/x/tools/go/analysis"
)
// alwaysTypedFact denotes that a function's return value will never
// be untyped nil. The analysis errs on the side of false negatives.
type alwaysTypedFact struct {
Rets uint8
}
func (*alwaysTypedFact) AFact() {}
func (fact *alwaysTypedFact) String() string {
return fmt.Sprintf("always typed: %08b", fact.Rets)
}
type Result struct {
m map[*types.Func]uint8
}
var Analysis = &analysis.Analyzer{
Name: "typedness",
Doc: "Annotates return values that are always typed values",
Run: run,
Requires: []*analysis.Analyzer{buildir.Analyzer},
FactTypes: []analysis.Fact{(*alwaysTypedFact)(nil)},
ResultType: reflect.TypeOf((*Result)(nil)),
}
// MustReturnTyped reports whether the ret's return value of fn must
// be a typed value, i.e. an interface value containing a concrete
// type or trivially a concrete type. The value of ret is zero-based.
//
// The analysis has false negatives: MustReturnTyped may incorrectly
// report false, but never incorrectly reports true.
func (r *Result) MustReturnTyped(fn *types.Func, ret int) bool {
if _, ok := fn.Type().(*types.Signature).Results().At(ret).Type().Underlying().(*types.Interface); !ok {
return true
}
return (r.m[fn] & (1 << ret)) != 0
}
func run(pass *analysis.Pass) (interface{}, error) {
seen := map[*ir.Function]struct{}{}
out := &Result{
m: map[*types.Func]uint8{},
}
for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
impl(pass, fn, seen)
}
for _, fact := range pass.AllObjectFacts() {
out.m[fact.Object.(*types.Func)] = fact.Fact.(*alwaysTypedFact).Rets
}
return out, nil
}
func impl(pass *analysis.Pass, fn *ir.Function, seenFns map[*ir.Function]struct{}) (out uint8) {
if fn.Signature.Results().Len() > 8 {
return 0
}
if fn.Object() == nil {
// TODO(dh): support closures
return 0
}
if fact := new(alwaysTypedFact); pass.ImportObjectFact(fn.Object(), fact) {
return fact.Rets
}
if fn.Pkg != pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg {
return 0
}
if fn.Blocks == nil {
return 0
}
if irutil.IsStub(fn) {
return 0
}
if _, ok := seenFns[fn]; ok {
// break recursion
return 0
}
seenFns[fn] = struct{}{}
defer func() {
for i := 0; i < fn.Signature.Results().Len(); i++ {
if _, ok := fn.Signature.Results().At(i).Type().Underlying().(*types.Interface); !ok {
// we don't need facts to know that non-interface
// types can't be untyped nil. zeroing out those bits
// may result in all bits being zero, in which case we
// don't have to save any fact.
out &= ^(1 << i)
}
}
if out > 0 {
pass.ExportObjectFact(fn.Object(), &alwaysTypedFact{out})
}
}()
isUntypedNil := func(v ir.Value) bool {
k, ok := v.(*ir.Const)
if !ok {
return false
}
if _, ok := k.Type().Underlying().(*types.Interface); !ok {
return false
}
return k.Value == nil
}
var do func(v ir.Value, seen map[ir.Value]struct{}) bool
do = func(v ir.Value, seen map[ir.Value]struct{}) bool {
if _, ok := seen[v]; ok {
// break cycle
return false
}
seen[v] = struct{}{}
switch v := v.(type) {
case *ir.Const:
// can't be a typed nil, because then we'd be returning the
// result of MakeInterface.
return false
case *ir.ChangeInterface:
return do(v.X, seen)
case *ir.Extract:
call, ok := v.Tuple.(*ir.Call)
if !ok {
// We only care about extracts of function results. For
// everything else (e.g. channel receives and map
// lookups), we can either not deduce any information, or
// will see a MakeInterface.
return false
}
if callee := call.Call.StaticCallee(); callee != nil {
return impl(pass, callee, seenFns)&(1<<v.Index) != 0
} else {
// we don't know what function we're calling. no need
// to look at the signature, though. if it weren't an
// interface, we'd be seeing a MakeInterface
// instruction.
return false
}
case *ir.Call:
if callee := v.Call.StaticCallee(); callee != nil {
return impl(pass, callee, seenFns)&1 != 0
} else {
// we don't know what function we're calling. no need
// to look at the signature, though. if it weren't an
// interface, we'd be seeing a MakeInterface
// instruction.
return false
}
case *ir.Sigma:
iff, ok := v.From.Control().(*ir.If)
if !ok {
// give up
return false
}
binop, ok := iff.Cond.(*ir.BinOp)
if !ok {
// give up
return false
}
if (binop.X == v.X && isUntypedNil(binop.Y)) || (isUntypedNil(binop.X) && binop.Y == v.X) {
op := binop.Op
if v.From.Succs[0] != v.Block() {
// we're in the false branch, negate op
switch op {
case token.EQL:
op = token.NEQ
case token.NEQ:
op = token.EQL
default:
panic(fmt.Sprintf("internal error: unhandled token %v", op))
}
}
switch op {
case token.EQL:
// returned value equals untyped nil
return false
case token.NEQ:
// returned value does not equal untyped nil
return true
default:
panic(fmt.Sprintf("internal error: unhandled token %v", op))
}
}
// TODO(dh): handle comparison with typed nil
// give up
return false
case *ir.Phi:
for _, pv := range v.Edges {
if !do(pv, seen) {
return false
}
}
return true
case *ir.MakeInterface:
terms, err := typeparams.NormalTerms(v.X.Type())
if len(terms) == 0 || err != nil {
// Type is a type parameter with no type terms (or we couldn't determine the terms). Such a type
// _can_ be nil when put in an interface value.
//
// There is no instruction that can create a guaranteed non-nil instance of a type parameter without
// type constraints, so we return false right away, without checking v.X's typedness.
return false
}
return true
case *ir.TypeAssert:
// type assertions fail for untyped nils. Either we have a
// single lhs and the type assertion succeeds or panics,
// or we have two lhs and we'll return Extract instead.
return true
case *ir.ChangeType:
// we'll only see interface->interface conversions, which
// don't tell us anything about the nilness.
return false
case *ir.MapLookup, *ir.Index, *ir.Recv, *ir.Parameter, *ir.Load, *ir.Field:
// All other instructions that tell us nothing about the
// typedness of interface values.
return false
default:
panic(fmt.Sprintf("internal error: unhandled type %T", v))
}
}
ret := fn.Exit.Control().(*ir.Return)
for i, v := range ret.Results {
typ := fn.Signature.Results().At(i).Type()
if _, ok := typ.Underlying().(*types.Interface); ok && !typeparams.IsTypeParam(typ) {
if do(v, map[ir.Value]struct{}{}) {
out |= 1 << i
}
}
}
return out
}

View File

@@ -0,0 +1,283 @@
// Package lint provides abstractions on top of go/analysis.
// These abstractions add extra information to analyzes, such as structured documentation and severities.
package lint
import (
"flag"
"fmt"
"go/ast"
"go/build"
"go/token"
"strconv"
"strings"
"golang.org/x/tools/go/analysis"
)
// Analyzer wraps a go/analysis.Analyzer and provides structured documentation.
type Analyzer struct {
// The analyzer's documentation. Unlike go/analysis.Analyzer.Doc,
// this field is structured, providing access to severity, options
// etc.
Doc *Documentation
Analyzer *analysis.Analyzer
}
func (a *Analyzer) initialize() {
a.Analyzer.Doc = a.Doc.String()
if a.Analyzer.Flags.Usage == nil {
fs := flag.NewFlagSet("", flag.PanicOnError)
fs.Var(newVersionFlag(), "go", "Target Go version")
a.Analyzer.Flags = *fs
}
}
// InitializeAnalyzers takes a map of documentation and a map of go/analysis.Analyzers and returns a slice of Analyzers.
// The map keys are the analyzer names.
func InitializeAnalyzers(docs map[string]*Documentation, analyzers map[string]*analysis.Analyzer) []*Analyzer {
out := make([]*Analyzer, 0, len(analyzers))
for k, v := range analyzers {
v.Name = k
a := &Analyzer{
Doc: docs[k],
Analyzer: v,
}
a.initialize()
out = append(out, a)
}
return out
}
// Severity describes the severity of diagnostics reported by an analyzer.
type Severity int
const (
SeverityNone Severity = iota
SeverityError
SeverityDeprecated
SeverityWarning
SeverityInfo
SeverityHint
)
// MergeStrategy sets how merge mode should behave for diagnostics of an analyzer.
type MergeStrategy int
const (
MergeIfAny MergeStrategy = iota
MergeIfAll
)
type RawDocumentation struct {
Title string
Text string
Before string
After string
Since string
NonDefault bool
Options []string
Severity Severity
MergeIf MergeStrategy
}
type Documentation struct {
Title string
Text string
TitleMarkdown string
TextMarkdown string
Before string
After string
Since string
NonDefault bool
Options []string
Severity Severity
MergeIf MergeStrategy
}
func Markdownify(m map[string]*RawDocumentation) map[string]*Documentation {
out := make(map[string]*Documentation, len(m))
for k, v := range m {
out[k] = &Documentation{
Title: strings.TrimSpace(stripMarkdown(v.Title)),
Text: strings.TrimSpace(stripMarkdown(v.Text)),
TitleMarkdown: strings.TrimSpace(toMarkdown(v.Title)),
TextMarkdown: strings.TrimSpace(toMarkdown(v.Text)),
Before: strings.TrimSpace(v.Before),
After: strings.TrimSpace(v.After),
Since: v.Since,
NonDefault: v.NonDefault,
Options: v.Options,
Severity: v.Severity,
MergeIf: v.MergeIf,
}
}
return out
}
func toMarkdown(s string) string {
return strings.NewReplacer(`\'`, "`", `\"`, "`").Replace(s)
}
func stripMarkdown(s string) string {
return strings.NewReplacer(`\'`, "", `\"`, "'").Replace(s)
}
func (doc *Documentation) Format(metadata bool) string {
return doc.format(false, metadata)
}
func (doc *Documentation) FormatMarkdown(metadata bool) string {
return doc.format(true, metadata)
}
func (doc *Documentation) format(markdown bool, metadata bool) string {
b := &strings.Builder{}
if markdown {
fmt.Fprintf(b, "%s\n\n", doc.TitleMarkdown)
if doc.Text != "" {
fmt.Fprintf(b, "%s\n\n", doc.TextMarkdown)
}
} else {
fmt.Fprintf(b, "%s\n\n", doc.Title)
if doc.Text != "" {
fmt.Fprintf(b, "%s\n\n", doc.Text)
}
}
if doc.Before != "" {
fmt.Fprintln(b, "Before:")
fmt.Fprintln(b, "")
for _, line := range strings.Split(doc.Before, "\n") {
fmt.Fprint(b, " ", line, "\n")
}
fmt.Fprintln(b, "")
fmt.Fprintln(b, "After:")
fmt.Fprintln(b, "")
for _, line := range strings.Split(doc.After, "\n") {
fmt.Fprint(b, " ", line, "\n")
}
fmt.Fprintln(b, "")
}
if metadata {
fmt.Fprint(b, "Available since\n ")
if doc.Since == "" {
fmt.Fprint(b, "unreleased")
} else {
fmt.Fprintf(b, "%s", doc.Since)
}
if doc.NonDefault {
fmt.Fprint(b, ", non-default")
}
fmt.Fprint(b, "\n")
if len(doc.Options) > 0 {
fmt.Fprintf(b, "\nOptions\n")
for _, opt := range doc.Options {
fmt.Fprintf(b, " %s", opt)
}
fmt.Fprint(b, "\n")
}
}
return b.String()
}
func (doc *Documentation) String() string {
return doc.Format(true)
}
func newVersionFlag() flag.Getter {
tags := build.Default.ReleaseTags
v := tags[len(tags)-1][2:]
version := new(VersionFlag)
if err := version.Set(v); err != nil {
panic(fmt.Sprintf("internal error: %s", err))
}
return version
}
type VersionFlag int
func (v *VersionFlag) String() string {
return fmt.Sprintf("1.%d", *v)
}
func (v *VersionFlag) Set(s string) error {
if len(s) < 3 {
return fmt.Errorf("invalid Go version: %q", s)
}
if s[0] != '1' {
return fmt.Errorf("invalid Go version: %q", s)
}
if s[1] != '.' {
return fmt.Errorf("invalid Go version: %q", s)
}
i, err := strconv.Atoi(s[2:])
if err != nil {
return fmt.Errorf("invalid Go version: %q", s)
}
*v = VersionFlag(i)
return nil
}
func (v *VersionFlag) Get() interface{} {
return int(*v)
}
// ExhaustiveTypeSwitch panics when called. It can be used to ensure
// that type switches are exhaustive.
func ExhaustiveTypeSwitch(v interface{}) {
panic(fmt.Sprintf("internal error: unhandled case %T", v))
}
// A directive is a comment of the form '//lint:<command>
// [arguments...]'. It represents instructions to the static analysis
// tool.
type Directive struct {
Command string
Arguments []string
Directive *ast.Comment
Node ast.Node
}
func parseDirective(s string) (cmd string, args []string) {
if !strings.HasPrefix(s, "//lint:") {
return "", nil
}
s = strings.TrimPrefix(s, "//lint:")
fields := strings.Split(s, " ")
return fields[0], fields[1:]
}
// ParseDirectives extracts all directives from a list of Go files.
func ParseDirectives(files []*ast.File, fset *token.FileSet) []Directive {
var dirs []Directive
for _, f := range files {
// OPT(dh): in our old code, we skip all the comment map work if we
// couldn't find any directives, benchmark if that's actually
// worth doing
cm := ast.NewCommentMap(fset, f, f.Comments)
for node, cgs := range cm {
for _, cg := range cgs {
for _, c := range cg.List {
if !strings.HasPrefix(c.Text, "//lint:") {
continue
}
cmd, args := parseDirective(c.Text)
d := Directive{
Command: cmd,
Arguments: args,
Directive: c,
Node: node,
}
dirs = append(dirs, d)
}
}
}
}
return dirs
}

View File

@@ -0,0 +1,247 @@
package report
import (
"bytes"
"fmt"
"go/ast"
"go/format"
"go/token"
"path/filepath"
"strconv"
"strings"
"honnef.co/go/tools/analysis/facts"
"honnef.co/go/tools/go/ast/astutil"
"golang.org/x/tools/go/analysis"
)
type Options struct {
ShortRange bool
FilterGenerated bool
Fixes []analysis.SuggestedFix
Related []analysis.RelatedInformation
}
type Option func(*Options)
func ShortRange() Option {
return func(opts *Options) {
opts.ShortRange = true
}
}
func FilterGenerated() Option {
return func(opts *Options) {
opts.FilterGenerated = true
}
}
func Fixes(fixes ...analysis.SuggestedFix) Option {
return func(opts *Options) {
opts.Fixes = append(opts.Fixes, fixes...)
}
}
func Related(node Positioner, message string) Option {
return func(opts *Options) {
pos, end, ok := getRange(node, opts.ShortRange)
if !ok {
return
}
r := analysis.RelatedInformation{
Pos: pos,
End: end,
Message: message,
}
opts.Related = append(opts.Related, r)
}
}
type Positioner interface {
Pos() token.Pos
}
type fullPositioner interface {
Pos() token.Pos
End() token.Pos
}
type sourcer interface {
Source() ast.Node
}
// shortRange returns the position and end of the main component of an
// AST node. For nodes that have no body, the short range is identical
// to the node's Pos and End. For nodes that do have a body, the short
// range excludes the body.
func shortRange(node ast.Node) (pos, end token.Pos) {
switch node := node.(type) {
case *ast.File:
return node.Pos(), node.Name.End()
case *ast.CaseClause:
return node.Pos(), node.Colon + 1
case *ast.CommClause:
return node.Pos(), node.Colon + 1
case *ast.DeferStmt:
return node.Pos(), node.Defer + token.Pos(len("defer"))
case *ast.ExprStmt:
return shortRange(node.X)
case *ast.ForStmt:
if node.Post != nil {
return node.For, node.Post.End()
} else if node.Cond != nil {
return node.For, node.Cond.End()
} else if node.Init != nil {
// +1 to catch the semicolon, for gofmt'ed code
return node.Pos(), node.Init.End() + 1
} else {
return node.Pos(), node.For + token.Pos(len("for"))
}
case *ast.FuncDecl:
return node.Pos(), node.Type.End()
case *ast.FuncLit:
return node.Pos(), node.Type.End()
case *ast.GoStmt:
if _, ok := astutil.Unparen(node.Call.Fun).(*ast.FuncLit); ok {
return node.Pos(), node.Go + token.Pos(len("go"))
} else {
return node.Pos(), node.End()
}
case *ast.IfStmt:
return node.Pos(), node.Cond.End()
case *ast.RangeStmt:
return node.Pos(), node.X.End()
case *ast.SelectStmt:
return node.Pos(), node.Pos() + token.Pos(len("select"))
case *ast.SwitchStmt:
if node.Tag != nil {
return node.Pos(), node.Tag.End()
} else if node.Init != nil {
// +1 to catch the semicolon, for gofmt'ed code
return node.Pos(), node.Init.End() + 1
} else {
return node.Pos(), node.Pos() + token.Pos(len("switch"))
}
case *ast.TypeSwitchStmt:
return node.Pos(), node.Assign.End()
default:
return node.Pos(), node.End()
}
}
func HasRange(node Positioner) bool {
// we don't know if getRange will be called with shortRange set to
// true, so make sure that both work.
_, _, ok := getRange(node, false)
if !ok {
return false
}
_, _, ok = getRange(node, true)
return ok
}
func getRange(node Positioner, short bool) (pos, end token.Pos, ok bool) {
switch n := node.(type) {
case sourcer:
s := n.Source()
if s == nil {
return 0, 0, false
}
if short {
p, e := shortRange(s)
return p, e, true
}
return s.Pos(), s.End(), true
case fullPositioner:
if short {
p, e := shortRange(n)
return p, e, true
}
return n.Pos(), n.End(), true
default:
return n.Pos(), token.NoPos, true
}
}
func Report(pass *analysis.Pass, node Positioner, message string, opts ...Option) {
cfg := &Options{}
for _, opt := range opts {
opt(cfg)
}
file := DisplayPosition(pass.Fset, node.Pos()).Filename
if cfg.FilterGenerated {
m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
if _, ok := m[file]; ok {
return
}
}
pos, end, ok := getRange(node, cfg.ShortRange)
if !ok {
panic(fmt.Sprintf("no valid position for reporting node %v", node))
}
d := analysis.Diagnostic{
Pos: pos,
End: end,
Message: message,
SuggestedFixes: cfg.Fixes,
Related: cfg.Related,
}
pass.Report(d)
}
func Render(pass *analysis.Pass, x interface{}) string {
var buf bytes.Buffer
if err := format.Node(&buf, pass.Fset, x); err != nil {
panic(err)
}
return buf.String()
}
func RenderArgs(pass *analysis.Pass, args []ast.Expr) string {
var ss []string
for _, arg := range args {
ss = append(ss, Render(pass, arg))
}
return strings.Join(ss, ", ")
}
func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position {
if p == token.NoPos {
return token.Position{}
}
// Only use the adjusted position if it points to another Go file.
// This means we'll point to the original file for cgo files, but
// we won't point to a YACC grammar file.
pos := fset.PositionFor(p, false)
adjPos := fset.PositionFor(p, true)
if filepath.Ext(adjPos.Filename) == ".go" {
return adjPos
}
return pos
}
func Ordinal(n int) string {
suffix := "th"
if n < 10 || n > 20 {
switch n % 10 {
case 0:
suffix = "th"
case 1:
suffix = "st"
case 2:
suffix = "nd"
case 3:
suffix = "rd"
default:
suffix = "th"
}
}
return strconv.Itoa(n) + suffix
}

View File

@@ -0,0 +1,15 @@
# staticcheck
_staticcheck_ offers extensive analysis of Go code, covering a myriad
of categories. It will detect bugs, suggest code simplifications,
point out dead code, and more.
## Installation
See [the main README](https://github.com/dominikh/go-tools#installation) for installation instructions.
## Documentation
Detailed documentation can be found on
[staticcheck.io](https://staticcheck.io/docs/).

View File

@@ -0,0 +1,45 @@
// staticcheck analyses Go code and makes it better.
package main
import (
"log"
"os"
"honnef.co/go/tools/lintcmd"
"honnef.co/go/tools/lintcmd/version"
"honnef.co/go/tools/quickfix"
"honnef.co/go/tools/simple"
"honnef.co/go/tools/staticcheck"
"honnef.co/go/tools/stylecheck"
"honnef.co/go/tools/unused"
)
func main() {
cmd := lintcmd.NewCommand("staticcheck")
cmd.SetVersion(version.Version, version.MachineVersion)
fs := cmd.FlagSet()
debug := fs.String("debug.unused-graph", "", "Write unused's object graph to `file`")
qf := fs.Bool("debug.run-quickfix-analyzers", false, "Run quickfix analyzers")
cmd.ParseFlags(os.Args[1:])
cmd.AddAnalyzers(simple.Analyzers...)
cmd.AddAnalyzers(staticcheck.Analyzers...)
cmd.AddAnalyzers(stylecheck.Analyzers...)
cmd.AddAnalyzers(unused.Analyzer)
if *qf {
cmd.AddAnalyzers(quickfix.Analyzers...)
}
if *debug != "" {
f, err := os.OpenFile(*debug, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
log.Fatal(err)
}
unused.Debug = f
}
cmd.Run()
}

View File

@@ -0,0 +1,266 @@
package config
import (
"bytes"
"fmt"
"go/ast"
"go/token"
"os"
"path/filepath"
"reflect"
"strings"
"github.com/BurntSushi/toml"
"golang.org/x/tools/go/analysis"
)
// Dir looks at a list of absolute file names, which should make up a
// single package, and returns the path of the directory that may
// contain a staticcheck.conf file. It returns the empty string if no
// such directory could be determined, for example because all files
// were located in Go's build cache.
func Dir(files []string) string {
if len(files) == 0 {
return ""
}
cache, err := os.UserCacheDir()
if err != nil {
cache = ""
}
var path string
for _, p := range files {
// FIXME(dh): using strings.HasPrefix isn't technically
// correct, but it should be good enough for now.
if cache != "" && strings.HasPrefix(p, cache) {
// File in the build cache of the standard Go build system
continue
}
path = p
break
}
if path == "" {
// The package only consists of generated files.
return ""
}
dir := filepath.Dir(path)
return dir
}
func dirAST(files []*ast.File, fset *token.FileSet) string {
names := make([]string, len(files))
for i, f := range files {
names[i] = fset.PositionFor(f.Pos(), true).Filename
}
return Dir(names)
}
var Analyzer = &analysis.Analyzer{
Name: "config",
Doc: "loads configuration for the current package tree",
Run: func(pass *analysis.Pass) (interface{}, error) {
dir := dirAST(pass.Files, pass.Fset)
if dir == "" {
cfg := DefaultConfig
return &cfg, nil
}
cfg, err := Load(dir)
if err != nil {
return nil, fmt.Errorf("error loading staticcheck.conf: %s", err)
}
return &cfg, nil
},
RunDespiteErrors: true,
ResultType: reflect.TypeOf((*Config)(nil)),
}
func For(pass *analysis.Pass) *Config {
return pass.ResultOf[Analyzer].(*Config)
}
func mergeLists(a, b []string) []string {
out := make([]string, 0, len(a)+len(b))
for _, el := range b {
if el == "inherit" {
out = append(out, a...)
} else {
out = append(out, el)
}
}
return out
}
func normalizeList(list []string) []string {
if len(list) > 1 {
nlist := make([]string, 0, len(list))
nlist = append(nlist, list[0])
for i, el := range list[1:] {
if el != list[i] {
nlist = append(nlist, el)
}
}
list = nlist
}
for _, el := range list {
if el == "inherit" {
// This should never happen, because the default config
// should not use "inherit"
panic(`unresolved "inherit"`)
}
}
return list
}
func (cfg Config) Merge(ocfg Config) Config {
if ocfg.Checks != nil {
cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks)
}
if ocfg.Initialisms != nil {
cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms)
}
if ocfg.DotImportWhitelist != nil {
cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist)
}
if ocfg.HTTPStatusCodeWhitelist != nil {
cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist)
}
return cfg
}
type Config struct {
// TODO(dh): this implementation makes it impossible for external
// clients to add their own checkers with configuration. At the
// moment, we don't really care about that; we don't encourage
// that people use this package. In the future, we may. The
// obvious solution would be using map[string]interface{}, but
// that's obviously subpar.
Checks []string `toml:"checks"`
Initialisms []string `toml:"initialisms"`
DotImportWhitelist []string `toml:"dot_import_whitelist"`
HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"`
}
func (c Config) String() string {
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "Checks: %#v\n", c.Checks)
fmt.Fprintf(buf, "Initialisms: %#v\n", c.Initialisms)
fmt.Fprintf(buf, "DotImportWhitelist: %#v\n", c.DotImportWhitelist)
fmt.Fprintf(buf, "HTTPStatusCodeWhitelist: %#v", c.HTTPStatusCodeWhitelist)
return buf.String()
}
// DefaultConfig is the default configuration.
// Its initial value describes the majority of the default configuration,
// but the Checks field can be updated at runtime based on the analyzers being used, to disable non-default checks.
// For cmd/staticcheck, this is handled by (*lintcmd.Command).Run.
//
// Note that DefaultConfig shouldn't be modified while analyzers are executing.
var DefaultConfig = Config{
Checks: []string{"all"},
Initialisms: []string{
"ACL", "API", "ASCII", "CPU", "CSS", "DNS",
"EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
"IP", "JSON", "QPS", "RAM", "RPC", "SLA",
"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
"UDP", "UI", "GID", "UID", "UUID", "URI",
"URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
"XSS", "SIP", "RTP", "AMQP", "DB", "TS",
},
DotImportWhitelist: []string{
"github.com/mmcloughlin/avo/build",
"github.com/mmcloughlin/avo/operand",
"github.com/mmcloughlin/avo/reg",
},
HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"},
}
const ConfigName = "staticcheck.conf"
type ParseError struct {
Filename string
toml.ParseError
}
func parseConfigs(dir string) ([]Config, error) {
var out []Config
// TODO(dh): consider stopping at the GOPATH/module boundary
for dir != "" {
f, err := os.Open(filepath.Join(dir, ConfigName))
if os.IsNotExist(err) {
ndir := filepath.Dir(dir)
if ndir == dir {
break
}
dir = ndir
continue
}
if err != nil {
return nil, err
}
var cfg Config
_, err = toml.DecodeReader(f, &cfg)
f.Close()
if err != nil {
if err, ok := err.(toml.ParseError); ok {
return nil, ParseError{
Filename: filepath.Join(dir, ConfigName),
ParseError: err,
}
}
return nil, err
}
out = append(out, cfg)
ndir := filepath.Dir(dir)
if ndir == dir {
break
}
dir = ndir
}
out = append(out, DefaultConfig)
if len(out) < 2 {
return out, nil
}
for i := 0; i < len(out)/2; i++ {
out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i]
}
return out, nil
}
func mergeConfigs(confs []Config) Config {
if len(confs) == 0 {
// This shouldn't happen because we always have at least a
// default config.
panic("trying to merge zero configs")
}
if len(confs) == 1 {
return confs[0]
}
conf := confs[0]
for _, oconf := range confs[1:] {
conf = conf.Merge(oconf)
}
return conf
}
func Load(dir string) (Config, error) {
confs, err := parseConfigs(dir)
if err != nil {
return Config{}, err
}
conf := mergeConfigs(confs)
conf.Checks = normalizeList(conf.Checks)
conf.Initialisms = normalizeList(conf.Initialisms)
conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist)
conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist)
return conf, nil
}

View File

@@ -0,0 +1,14 @@
checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS",
"EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
"IP", "JSON", "QPS", "RAM", "RPC", "SLA",
"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
"UDP", "UI", "GID", "UID", "UUID", "URI",
"URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
"XSS", "SIP", "RTP", "AMQP", "DB", "TS"]
dot_import_whitelist = [
"github.com/mmcloughlin/avo/build",
"github.com/mmcloughlin/avo/operand",
"github.com/mmcloughlin/avo/reg",
]
http_status_code_whitelist = ["200", "400", "404", "500"]

View File

@@ -0,0 +1,20 @@
package astutil
import (
"go/ast"
"go/token"
_ "unsafe"
"golang.org/x/tools/go/ast/astutil"
)
type Cursor = astutil.Cursor
type ApplyFunc = astutil.ApplyFunc
func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
return astutil.Apply(root, pre, post)
}
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
return astutil.PathEnclosingInterval(root, start, end)
}

View File

@@ -0,0 +1,364 @@
package astutil
import (
"fmt"
"go/ast"
"go/token"
"reflect"
"strings"
"golang.org/x/exp/typeparams"
)
func IsIdent(expr ast.Expr, ident string) bool {
id, ok := expr.(*ast.Ident)
return ok && id.Name == ident
}
// isBlank returns whether id is the blank identifier "_".
// If id == nil, the answer is false.
func IsBlank(id ast.Expr) bool {
ident, _ := id.(*ast.Ident)
return ident != nil && ident.Name == "_"
}
// Deprecated: use code.IsIntegerLiteral instead.
func IsIntLiteral(expr ast.Expr, literal string) bool {
lit, ok := expr.(*ast.BasicLit)
return ok && lit.Kind == token.INT && lit.Value == literal
}
// Deprecated: use IsIntLiteral instead
func IsZero(expr ast.Expr) bool {
return IsIntLiteral(expr, "0")
}
func Preamble(f *ast.File) string {
cutoff := f.Package
if f.Doc != nil {
cutoff = f.Doc.Pos()
}
var out []string
for _, cmt := range f.Comments {
if cmt.Pos() >= cutoff {
break
}
out = append(out, cmt.Text())
}
return strings.Join(out, "\n")
}
func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec {
if len(specs) == 0 {
return nil
}
groups := make([][]ast.Spec, 1)
groups[0] = append(groups[0], specs[0])
for _, spec := range specs[1:] {
g := groups[len(groups)-1]
if fset.PositionFor(spec.Pos(), false).Line-1 !=
fset.PositionFor(g[len(g)-1].End(), false).Line {
groups = append(groups, nil)
}
groups[len(groups)-1] = append(groups[len(groups)-1], spec)
}
return groups
}
// Unparen returns e with any enclosing parentheses stripped.
func Unparen(e ast.Expr) ast.Expr {
for {
p, ok := e.(*ast.ParenExpr)
if !ok {
return e
}
e = p.X
}
}
// CopyExpr creates a deep copy of an expression.
// It doesn't support copying FuncLits and returns ok == false when encountering one.
func CopyExpr(node ast.Expr) (ast.Expr, bool) {
switch node := node.(type) {
case *ast.BasicLit:
cp := *node
return &cp, true
case *ast.BinaryExpr:
cp := *node
var ok1, ok2 bool
cp.X, ok1 = CopyExpr(cp.X)
cp.Y, ok2 = CopyExpr(cp.Y)
return &cp, ok1 && ok2
case *ast.CallExpr:
var ok bool
cp := *node
cp.Fun, ok = CopyExpr(cp.Fun)
if !ok {
return nil, false
}
cp.Args = make([]ast.Expr, len(node.Args))
for i, v := range node.Args {
cp.Args[i], ok = CopyExpr(v)
if !ok {
return nil, false
}
}
return &cp, true
case *ast.CompositeLit:
var ok bool
cp := *node
cp.Type, ok = CopyExpr(cp.Type)
if !ok {
return nil, false
}
cp.Elts = make([]ast.Expr, len(node.Elts))
for i, v := range node.Elts {
cp.Elts[i], ok = CopyExpr(v)
if !ok {
return nil, false
}
}
return &cp, true
case *ast.Ident:
cp := *node
return &cp, true
case *ast.IndexExpr:
var ok1, ok2 bool
cp := *node
cp.X, ok1 = CopyExpr(cp.X)
cp.Index, ok2 = CopyExpr(cp.Index)
return &cp, ok1 && ok2
case *typeparams.IndexListExpr:
var ok bool
cp := *node
cp.X, ok = CopyExpr(cp.X)
if !ok {
return nil, false
}
for i, v := range node.Indices {
cp.Indices[i], ok = CopyExpr(v)
if !ok {
return nil, false
}
}
return &cp, true
case *ast.KeyValueExpr:
var ok1, ok2 bool
cp := *node
cp.Key, ok1 = CopyExpr(cp.Key)
cp.Value, ok2 = CopyExpr(cp.Value)
return &cp, ok1 && ok2
case *ast.ParenExpr:
var ok bool
cp := *node
cp.X, ok = CopyExpr(cp.X)
return &cp, ok
case *ast.SelectorExpr:
var ok bool
cp := *node
cp.X, ok = CopyExpr(cp.X)
if !ok {
return nil, false
}
sel, ok := CopyExpr(cp.Sel)
if !ok {
// this is impossible
return nil, false
}
cp.Sel = sel.(*ast.Ident)
return &cp, true
case *ast.SliceExpr:
var ok1, ok2, ok3, ok4 bool
cp := *node
cp.X, ok1 = CopyExpr(cp.X)
cp.Low, ok2 = CopyExpr(cp.Low)
cp.High, ok3 = CopyExpr(cp.High)
cp.Max, ok4 = CopyExpr(cp.Max)
return &cp, ok1 && ok2 && ok3 && ok4
case *ast.StarExpr:
var ok bool
cp := *node
cp.X, ok = CopyExpr(cp.X)
return &cp, ok
case *ast.TypeAssertExpr:
var ok1, ok2 bool
cp := *node
cp.X, ok1 = CopyExpr(cp.X)
cp.Type, ok2 = CopyExpr(cp.Type)
return &cp, ok1 && ok2
case *ast.UnaryExpr:
var ok bool
cp := *node
cp.X, ok = CopyExpr(cp.X)
return &cp, ok
case *ast.MapType:
var ok1, ok2 bool
cp := *node
cp.Key, ok1 = CopyExpr(cp.Key)
cp.Value, ok2 = CopyExpr(cp.Value)
return &cp, ok1 && ok2
case *ast.ArrayType:
var ok1, ok2 bool
cp := *node
cp.Len, ok1 = CopyExpr(cp.Len)
cp.Elt, ok2 = CopyExpr(cp.Elt)
return &cp, ok1 && ok2
case *ast.Ellipsis:
var ok bool
cp := *node
cp.Elt, ok = CopyExpr(cp.Elt)
return &cp, ok
case *ast.InterfaceType:
cp := *node
return &cp, true
case *ast.StructType:
cp := *node
return &cp, true
case *ast.FuncLit, *ast.FuncType:
// TODO(dh): implement copying of function literals and types.
return nil, false
case *ast.ChanType:
var ok bool
cp := *node
cp.Value, ok = CopyExpr(cp.Value)
return &cp, ok
case nil:
return nil, true
default:
panic(fmt.Sprintf("unreachable: %T", node))
}
}
func Equal(a, b ast.Node) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
if reflect.TypeOf(a) != reflect.TypeOf(b) {
return false
}
switch a := a.(type) {
case *ast.BasicLit:
b := b.(*ast.BasicLit)
return a.Kind == b.Kind && a.Value == b.Value
case *ast.BinaryExpr:
b := b.(*ast.BinaryExpr)
return Equal(a.X, b.X) && a.Op == b.Op && Equal(a.Y, b.Y)
case *ast.CallExpr:
b := b.(*ast.CallExpr)
if len(a.Args) != len(b.Args) {
return false
}
for i, arg := range a.Args {
if !Equal(arg, b.Args[i]) {
return false
}
}
return Equal(a.Fun, b.Fun) &&
(a.Ellipsis == token.NoPos && b.Ellipsis == token.NoPos || a.Ellipsis != token.NoPos && b.Ellipsis != token.NoPos)
case *ast.CompositeLit:
b := b.(*ast.CompositeLit)
if len(a.Elts) != len(b.Elts) {
return false
}
for i, elt := range b.Elts {
if !Equal(elt, b.Elts[i]) {
return false
}
}
return Equal(a.Type, b.Type) && a.Incomplete == b.Incomplete
case *ast.Ident:
b := b.(*ast.Ident)
return a.Name == b.Name
case *ast.IndexExpr:
b := b.(*ast.IndexExpr)
return Equal(a.X, b.X) && Equal(a.Index, b.Index)
case *typeparams.IndexListExpr:
b := b.(*typeparams.IndexListExpr)
if len(a.Indices) != len(b.Indices) {
return false
}
for i, v := range a.Indices {
if !Equal(v, b.Indices[i]) {
return false
}
}
return Equal(a.X, b.X)
case *ast.KeyValueExpr:
b := b.(*ast.KeyValueExpr)
return Equal(a.Key, b.Key) && Equal(a.Value, b.Value)
case *ast.ParenExpr:
b := b.(*ast.ParenExpr)
return Equal(a.X, b.X)
case *ast.SelectorExpr:
b := b.(*ast.SelectorExpr)
return Equal(a.X, b.X) && Equal(a.Sel, b.Sel)
case *ast.SliceExpr:
b := b.(*ast.SliceExpr)
return Equal(a.X, b.X) && Equal(a.Low, b.Low) && Equal(a.High, b.High) && Equal(a.Max, b.Max) && a.Slice3 == b.Slice3
case *ast.StarExpr:
b := b.(*ast.StarExpr)
return Equal(a.X, b.X)
case *ast.TypeAssertExpr:
b := b.(*ast.TypeAssertExpr)
return Equal(a.X, b.X) && Equal(a.Type, b.Type)
case *ast.UnaryExpr:
b := b.(*ast.UnaryExpr)
return a.Op == b.Op && Equal(a.X, b.X)
case *ast.MapType:
b := b.(*ast.MapType)
return Equal(a.Key, b.Key) && Equal(a.Value, b.Value)
case *ast.ArrayType:
b := b.(*ast.ArrayType)
return Equal(a.Len, b.Len) && Equal(a.Elt, b.Elt)
case *ast.Ellipsis:
b := b.(*ast.Ellipsis)
return Equal(a.Elt, b.Elt)
case *ast.InterfaceType:
b := b.(*ast.InterfaceType)
return a.Incomplete == b.Incomplete && Equal(a.Methods, b.Methods)
case *ast.StructType:
b := b.(*ast.StructType)
return a.Incomplete == b.Incomplete && Equal(a.Fields, b.Fields)
case *ast.FuncLit:
// TODO(dh): support function literals
return false
case *ast.ChanType:
b := b.(*ast.ChanType)
return a.Dir == b.Dir && (a.Arrow == token.NoPos && b.Arrow == token.NoPos || a.Arrow != token.NoPos && b.Arrow != token.NoPos)
case *ast.FieldList:
b := b.(*ast.FieldList)
if len(a.List) != len(b.List) {
return false
}
for i, fieldA := range a.List {
if !Equal(fieldA, b.List[i]) {
return false
}
}
return true
case *ast.Field:
b := b.(*ast.Field)
if len(a.Names) != len(b.Names) {
return false
}
for j, name := range a.Names {
if !Equal(name, b.Names[j]) {
return false
}
}
if !Equal(a.Type, b.Type) || !Equal(a.Tag, b.Tag) {
return false
}
return true
default:
panic(fmt.Sprintf("unreachable: %T", a))
}
}

View File

@@ -0,0 +1,5 @@
This package extracts buildid.go and note.go from cmd/internal/buildid/.
We have modified it to remove support for AIX big archive files, to cut down on our dependencies.
The last upstream commit we've looked at was: 639acdc833bfd12b7edd43092d1b380d70cb2874

View File

@@ -0,0 +1,238 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package buildid
import (
"bytes"
"debug/elf"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
)
var errBuildIDMalformed = fmt.Errorf("malformed object file")
var (
bangArch = []byte("!<arch>")
pkgdef = []byte("__.PKGDEF")
goobject = []byte("go object ")
buildid = []byte("build id ")
)
// ReadFile reads the build ID from an archive or executable file.
func ReadFile(name string) (id string, err error) {
f, err := os.Open(name)
if err != nil {
return "", err
}
defer f.Close()
buf := make([]byte, 8)
if _, err := f.ReadAt(buf, 0); err != nil {
return "", err
}
if string(buf) != "!<arch>\n" {
if string(buf) == "<bigaf>\n" {
return "", errors.New("unsupported")
}
return readBinary(name, f)
}
// Read just enough of the target to fetch the build ID.
// The archive is expected to look like:
//
// !<arch>
// __.PKGDEF 0 0 0 644 7955 `
// go object darwin amd64 devel X:none
// build id "b41e5c45250e25c9fd5e9f9a1de7857ea0d41224"
//
// The variable-sized strings are GOOS, GOARCH, and the experiment list (X:none).
// Reading the first 1024 bytes should be plenty.
data := make([]byte, 1024)
n, err := io.ReadFull(f, data)
if err != nil && n == 0 {
return "", err
}
tryGccgo := func() (string, error) {
return readGccgoArchive(name, f)
}
// Archive header.
for i := 0; ; i++ { // returns during i==3
j := bytes.IndexByte(data, '\n')
if j < 0 {
return tryGccgo()
}
line := data[:j]
data = data[j+1:]
switch i {
case 0:
if !bytes.Equal(line, bangArch) {
return tryGccgo()
}
case 1:
if !bytes.HasPrefix(line, pkgdef) {
return tryGccgo()
}
case 2:
if !bytes.HasPrefix(line, goobject) {
return tryGccgo()
}
case 3:
if !bytes.HasPrefix(line, buildid) {
// Found the object header, just doesn't have a build id line.
// Treat as successful, with empty build id.
return "", nil
}
id, err := strconv.Unquote(string(line[len(buildid):]))
if err != nil {
return tryGccgo()
}
return id, nil
}
}
}
// readGccgoArchive tries to parse the archive as a standard Unix
// archive file, and fetch the build ID from the _buildid.o entry.
// The _buildid.o entry is written by (*Builder).gccgoBuildIDELFFile
// in cmd/go/internal/work/exec.go.
func readGccgoArchive(name string, f *os.File) (string, error) {
bad := func() (string, error) {
return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed}
}
off := int64(8)
for {
if _, err := f.Seek(off, io.SeekStart); err != nil {
return "", err
}
// TODO(iant): Make a debug/ar package, and use it
// here and in cmd/link.
var hdr [60]byte
if _, err := io.ReadFull(f, hdr[:]); err != nil {
if err == io.EOF {
// No more entries, no build ID.
return "", nil
}
return "", err
}
off += 60
sizeStr := strings.TrimSpace(string(hdr[48:58]))
size, err := strconv.ParseInt(sizeStr, 0, 64)
if err != nil {
return bad()
}
name := strings.TrimSpace(string(hdr[:16]))
if name == "_buildid.o/" {
sr := io.NewSectionReader(f, off, size)
e, err := elf.NewFile(sr)
if err != nil {
return bad()
}
s := e.Section(".go.buildid")
if s == nil {
return bad()
}
data, err := s.Data()
if err != nil {
return bad()
}
return string(data), nil
}
off += size
if off&1 != 0 {
off++
}
}
}
var (
goBuildPrefix = []byte("\xff Go build ID: \"")
goBuildEnd = []byte("\"\n \xff")
elfPrefix = []byte("\x7fELF")
machoPrefixes = [][]byte{
{0xfe, 0xed, 0xfa, 0xce},
{0xfe, 0xed, 0xfa, 0xcf},
{0xce, 0xfa, 0xed, 0xfe},
{0xcf, 0xfa, 0xed, 0xfe},
}
)
var readSize = 32 * 1024 // changed for testing
// readBinary reads the build ID from a binary.
//
// ELF binaries store the build ID in a proper PT_NOTE section.
//
// Other binary formats are not so flexible. For those, the linker
// stores the build ID as non-instruction bytes at the very beginning
// of the text segment, which should appear near the beginning
// of the file. This is clumsy but fairly portable. Custom locations
// can be added for other binary types as needed, like we did for ELF.
func readBinary(name string, f *os.File) (id string, err error) {
// Read the first 32 kB of the binary file.
// That should be enough to find the build ID.
// In ELF files, the build ID is in the leading headers,
// which are typically less than 4 kB, not to mention 32 kB.
// In Mach-O files, there's no limit, so we have to parse the file.
// On other systems, we're trying to read enough that
// we get the beginning of the text segment in the read.
// The offset where the text segment begins in a hello
// world compiled for each different object format today:
//
// Plan 9: 0x20
// Windows: 0x600
//
data := make([]byte, readSize)
_, err = io.ReadFull(f, data)
if err == io.ErrUnexpectedEOF {
err = nil
}
if err != nil {
return "", err
}
if bytes.HasPrefix(data, elfPrefix) {
return readELF(name, f, data)
}
for _, m := range machoPrefixes {
if bytes.HasPrefix(data, m) {
return readMacho(name, f, data)
}
}
return readRaw(name, data)
}
// readRaw finds the raw build ID stored in text segment data.
func readRaw(name string, data []byte) (id string, err error) {
i := bytes.Index(data, goBuildPrefix)
if i < 0 {
// Missing. Treat as successful but build ID empty.
return "", nil
}
j := bytes.Index(data[i+len(goBuildPrefix):], goBuildEnd)
if j < 0 {
return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed}
}
quoted := data[i+len(goBuildPrefix)-1 : i+len(goBuildPrefix)+j+1]
id, err = strconv.Unquote(string(quoted))
if err != nil {
return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed}
}
return id, nil
}

View File

@@ -0,0 +1,207 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package buildid
import (
"bytes"
"debug/elf"
"debug/macho"
"encoding/binary"
"fmt"
"io"
"os"
)
func readAligned4(r io.Reader, sz int32) ([]byte, error) {
full := (sz + 3) &^ 3
data := make([]byte, full)
_, err := io.ReadFull(r, data)
if err != nil {
return nil, err
}
data = data[:sz]
return data, nil
}
func ReadELFNote(filename, name string, typ int32) ([]byte, error) {
f, err := elf.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
for _, sect := range f.Sections {
if sect.Type != elf.SHT_NOTE {
continue
}
r := sect.Open()
for {
var namesize, descsize, noteType int32
err = binary.Read(r, f.ByteOrder, &namesize)
if err != nil {
if err == io.EOF {
break
}
return nil, fmt.Errorf("read namesize failed: %v", err)
}
err = binary.Read(r, f.ByteOrder, &descsize)
if err != nil {
return nil, fmt.Errorf("read descsize failed: %v", err)
}
err = binary.Read(r, f.ByteOrder, &noteType)
if err != nil {
return nil, fmt.Errorf("read type failed: %v", err)
}
noteName, err := readAligned4(r, namesize)
if err != nil {
return nil, fmt.Errorf("read name failed: %v", err)
}
desc, err := readAligned4(r, descsize)
if err != nil {
return nil, fmt.Errorf("read desc failed: %v", err)
}
if name == string(noteName) && typ == noteType {
return desc, nil
}
}
}
return nil, nil
}
var elfGoNote = []byte("Go\x00\x00")
var elfGNUNote = []byte("GNU\x00")
// The Go build ID is stored in a note described by an ELF PT_NOTE prog
// header. The caller has already opened filename, to get f, and read
// at least 4 kB out, in data.
func readELF(name string, f *os.File, data []byte) (buildid string, err error) {
// Assume the note content is in the data, already read.
// Rewrite the ELF header to set shnum to 0, so that we can pass
// the data to elf.NewFile and it will decode the Prog list but not
// try to read the section headers and the string table from disk.
// That's a waste of I/O when all we care about is the Prog list
// and the one ELF note.
switch elf.Class(data[elf.EI_CLASS]) {
case elf.ELFCLASS32:
data[48] = 0
data[49] = 0
case elf.ELFCLASS64:
data[60] = 0
data[61] = 0
}
const elfGoBuildIDTag = 4
const gnuBuildIDTag = 3
ef, err := elf.NewFile(bytes.NewReader(data))
if err != nil {
return "", &os.PathError{Path: name, Op: "parse", Err: err}
}
var gnu string
for _, p := range ef.Progs {
if p.Type != elf.PT_NOTE || p.Filesz < 16 {
continue
}
var note []byte
if p.Off+p.Filesz < uint64(len(data)) {
note = data[p.Off : p.Off+p.Filesz]
} else {
// For some linkers, such as the Solaris linker,
// the buildid may not be found in data (which
// likely contains the first 16kB of the file)
// or even the first few megabytes of the file
// due to differences in note segment placement;
// in that case, extract the note data manually.
_, err = f.Seek(int64(p.Off), io.SeekStart)
if err != nil {
return "", err
}
note = make([]byte, p.Filesz)
_, err = io.ReadFull(f, note)
if err != nil {
return "", err
}
}
filesz := p.Filesz
off := p.Off
for filesz >= 16 {
nameSize := ef.ByteOrder.Uint32(note)
valSize := ef.ByteOrder.Uint32(note[4:])
tag := ef.ByteOrder.Uint32(note[8:])
nname := note[12:16]
if nameSize == 4 && 16+valSize <= uint32(len(note)) && tag == elfGoBuildIDTag && bytes.Equal(nname, elfGoNote) {
return string(note[16 : 16+valSize]), nil
}
if nameSize == 4 && 16+valSize <= uint32(len(note)) && tag == gnuBuildIDTag && bytes.Equal(nname, elfGNUNote) {
gnu = string(note[16 : 16+valSize])
}
nameSize = (nameSize + 3) &^ 3
valSize = (valSize + 3) &^ 3
notesz := uint64(12 + nameSize + valSize)
if filesz <= notesz {
break
}
off += notesz
align := p.Align
alignedOff := (off + align - 1) &^ (align - 1)
notesz += alignedOff - off
off = alignedOff
filesz -= notesz
note = note[notesz:]
}
}
// If we didn't find a Go note, use a GNU note if available.
// This is what gccgo uses.
if gnu != "" {
return gnu, nil
}
// No note. Treat as successful but build ID empty.
return "", nil
}
// The Go build ID is stored at the beginning of the Mach-O __text segment.
// The caller has already opened filename, to get f, and read a few kB out, in data.
// Sadly, that's not guaranteed to hold the note, because there is an arbitrary amount
// of other junk placed in the file ahead of the main text.
func readMacho(name string, f *os.File, data []byte) (buildid string, err error) {
// If the data we want has already been read, don't worry about Mach-O parsing.
// This is both an optimization and a hedge against the Mach-O parsing failing
// in the future due to, for example, the name of the __text section changing.
if b, err := readRaw(name, data); b != "" && err == nil {
return b, err
}
mf, err := macho.NewFile(f)
if err != nil {
return "", &os.PathError{Path: name, Op: "parse", Err: err}
}
sect := mf.Section("__text")
if sect == nil {
// Every binary has a __text section. Something is wrong.
return "", &os.PathError{Path: name, Op: "parse", Err: fmt.Errorf("cannot find __text section")}
}
// It should be in the first few bytes, but read a lot just in case,
// especially given our past problems on OS X with the build ID moving.
// There shouldn't be much difference between reading 4kB and 32kB:
// the hard part is getting to the data, not transferring it.
n := sect.Size
if n > uint64(readSize) {
n = uint64(readSize)
}
buf := make([]byte, n)
if _, err := f.ReadAt(buf, int64(sect.Offset)); err != nil {
return "", err
}
return readRaw(name, buf)
}

28
vendor/honnef.co/go/tools/go/ir/LICENSE vendored Normal file
View File

@@ -0,0 +1,28 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright (c) 2016 Dominik Honnef. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,9 @@
This package started as a copy of golang.org/x/tools/go/ssa, imported from an unknown commit in 2016.
It has since been heavily modified to match our own needs in an IR.
The changes are too many to list here, and it is best to consider this package independent of go/ssa.
Upstream changes still get applied when they address bugs in portions of code we have inherited.
The last upstream commit we've looked at was:
915f6209478fe61eb90dbe155a8a1c58655b931f

View File

@@ -0,0 +1,209 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// Simple block optimizations to simplify the control flow graph.
// TODO(adonovan): opt: instead of creating several "unreachable" blocks
// per function in the Builder, reuse a single one (e.g. at Blocks[1])
// to reduce garbage.
import (
"fmt"
"os"
)
// If true, perform sanity checking and show progress at each
// successive iteration of optimizeBlocks. Very verbose.
const debugBlockOpt = false
// markReachable sets Index=-1 for all blocks reachable from b.
func markReachable(b *BasicBlock) {
b.gaps = -1
for _, succ := range b.Succs {
if succ.gaps == 0 {
markReachable(succ)
}
}
}
// deleteUnreachableBlocks marks all reachable blocks of f and
// eliminates (nils) all others, including possibly cyclic subgraphs.
//
func deleteUnreachableBlocks(f *Function) {
const white, black = 0, -1
// We borrow b.gaps temporarily as the mark bit.
for _, b := range f.Blocks {
b.gaps = white
}
markReachable(f.Blocks[0])
// In SSI form, we need the exit to be reachable for correct
// post-dominance information. In original form, however, we
// cannot unconditionally mark it reachable because we won't
// be adding fake edges, and this breaks the calculation of
// dominance information.
markReachable(f.Exit)
for i, b := range f.Blocks {
if b.gaps == white {
for _, c := range b.Succs {
if c.gaps == black {
c.removePred(b) // delete white->black edge
}
}
if debugBlockOpt {
fmt.Fprintln(os.Stderr, "unreachable", b)
}
f.Blocks[i] = nil // delete b
}
}
f.removeNilBlocks()
}
// jumpThreading attempts to apply simple jump-threading to block b,
// in which a->b->c become a->c if b is just a Jump.
// The result is true if the optimization was applied.
//
func jumpThreading(f *Function, b *BasicBlock) bool {
if b.Index == 0 {
return false // don't apply to entry block
}
if b.Instrs == nil {
return false
}
for _, pred := range b.Preds {
switch pred.Control().(type) {
case *ConstantSwitch:
// don't optimize away the head blocks of switch statements
return false
}
}
if _, ok := b.Instrs[0].(*Jump); !ok {
return false // not just a jump
}
c := b.Succs[0]
if c == b {
return false // don't apply to degenerate jump-to-self.
}
if c.hasPhi() {
return false // not sound without more effort
}
for j, a := range b.Preds {
a.replaceSucc(b, c)
// If a now has two edges to c, replace its degenerate If by Jump.
if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c {
jump := new(Jump)
jump.setBlock(a)
a.Instrs[len(a.Instrs)-1] = jump
a.Succs = a.Succs[:1]
c.removePred(b)
} else {
if j == 0 {
c.replacePred(b, a)
} else {
c.Preds = append(c.Preds, a)
}
}
if debugBlockOpt {
fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c)
}
}
f.Blocks[b.Index] = nil // delete b
return true
}
// fuseBlocks attempts to apply the block fusion optimization to block
// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1.
// The result is true if the optimization was applied.
//
func fuseBlocks(f *Function, a *BasicBlock) bool {
if len(a.Succs) != 1 {
return false
}
if a.Succs[0] == f.Exit {
return false
}
b := a.Succs[0]
if len(b.Preds) != 1 {
return false
}
if _, ok := a.Instrs[len(a.Instrs)-1].(*Panic); ok {
// panics aren't simple jumps, they have side effects.
return false
}
// Degenerate &&/|| ops may result in a straight-line CFG
// containing φ-nodes. (Ideally we'd replace such them with
// their sole operand but that requires Referrers, built later.)
if b.hasPhi() {
return false // not sound without further effort
}
// Eliminate jump at end of A, then copy all of B across.
a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...)
for _, instr := range b.Instrs {
instr.setBlock(a)
}
// A inherits B's successors
a.Succs = append(a.succs2[:0], b.Succs...)
// Fix up Preds links of all successors of B.
for _, c := range b.Succs {
c.replacePred(b, a)
}
if debugBlockOpt {
fmt.Fprintln(os.Stderr, "fuseBlocks", a, b)
}
f.Blocks[b.Index] = nil // delete b
return true
}
// optimizeBlocks() performs some simple block optimizations on a
// completed function: dead block elimination, block fusion, jump
// threading.
//
func optimizeBlocks(f *Function) {
if debugBlockOpt {
f.WriteTo(os.Stderr)
mustSanityCheck(f, nil)
}
deleteUnreachableBlocks(f)
// Loop until no further progress.
changed := true
for changed {
changed = false
if debugBlockOpt {
f.WriteTo(os.Stderr)
mustSanityCheck(f, nil)
}
for _, b := range f.Blocks {
// f.Blocks will temporarily contain nils to indicate
// deleted blocks; we remove them at the end.
if b == nil {
continue
}
// Fuse blocks. b->c becomes bc.
if fuseBlocks(f, b) {
changed = true
}
// a->b->c becomes a->c if b contains only a Jump.
if jumpThreading(f, b) {
changed = true
continue // (b was disconnected)
}
}
}
f.removeNilBlocks()
}

2621
vendor/honnef.co/go/tools/go/ir/builder.go vendored Normal file

File diff suppressed because it is too large Load Diff

275
vendor/honnef.co/go/tools/go/ir/const.go vendored Normal file
View File

@@ -0,0 +1,275 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// This file defines the Const SSA value type.
import (
"fmt"
"go/constant"
"go/types"
"strconv"
"strings"
"golang.org/x/exp/typeparams"
"honnef.co/go/tools/go/types/typeutil"
)
// NewConst returns a new constant of the specified value and type.
// val must be valid according to the specification of Const.Value.
//
func NewConst(val constant.Value, typ types.Type) *Const {
return &Const{
register: register{
typ: typ,
},
Value: val,
}
}
// intConst returns an 'int' constant that evaluates to i.
// (i is an int64 in case the host is narrower than the target.)
func intConst(i int64) *Const {
return NewConst(constant.MakeInt64(i), tInt)
}
// nilConst returns a nil constant of the specified type, which may
// be any reference type, including interfaces.
//
func nilConst(typ types.Type) *Const {
return NewConst(nil, typ)
}
// stringConst returns a 'string' constant that evaluates to s.
func stringConst(s string) *Const {
return NewConst(constant.MakeString(s), tString)
}
// zeroConst returns a new "zero" constant of the specified type.
func zeroConst(t types.Type) Constant {
if _, ok := t.Underlying().(*types.Interface); ok && !typeparams.IsTypeParam(t) {
// Handle non-generic interface early to simplify following code.
return nilConst(t)
}
tset := typeutil.NewTypeSet(t)
switch typ := tset.CoreType().(type) {
case *types.Struct:
values := make([]Constant, typ.NumFields())
for i := 0; i < typ.NumFields(); i++ {
values[i] = zeroConst(typ.Field(i).Type())
}
return &AggregateConst{
register: register{typ: t},
Values: values,
}
case *types.Tuple:
values := make([]Constant, typ.Len())
for i := 0; i < typ.Len(); i++ {
values[i] = zeroConst(typ.At(i).Type())
}
return &AggregateConst{
register: register{typ: t},
Values: values,
}
}
isNillable := func(term *typeparams.Term) bool {
switch typ := term.Type().Underlying().(type) {
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature, *typeutil.Iterator:
return true
case *types.Basic:
switch typ.Kind() {
case types.UnsafePointer, types.UntypedNil:
return true
default:
return false
}
default:
return false
}
}
isInfo := func(info types.BasicInfo) func(*typeparams.Term) bool {
return func(term *typeparams.Term) bool {
basic, ok := term.Type().Underlying().(*types.Basic)
if !ok {
return false
}
return (basic.Info() & info) != 0
}
}
isArray := func(term *typeparams.Term) bool {
_, ok := term.Type().Underlying().(*types.Array)
return ok
}
switch {
case tset.All(isInfo(types.IsNumeric)):
return NewConst(constant.MakeInt64(0), t)
case tset.All(isInfo(types.IsString)):
return NewConst(constant.MakeString(""), t)
case tset.All(isInfo(types.IsBoolean)):
return NewConst(constant.MakeBool(false), t)
case tset.All(isNillable):
return nilConst(t)
case tset.All(isArray):
var k ArrayConst
k.setType(t)
return &k
default:
var k GenericConst
k.setType(t)
return &k
}
}
func (c *Const) RelString(from *types.Package) string {
var p string
if c.Value == nil {
p = "nil"
} else if c.Value.Kind() == constant.String {
v := constant.StringVal(c.Value)
const max = 20
// TODO(adonovan): don't cut a rune in half.
if len(v) > max {
v = v[:max-3] + "..." // abbreviate
}
p = strconv.Quote(v)
} else {
p = c.Value.String()
}
return fmt.Sprintf("Const <%s> {%s}", relType(c.Type(), from), p)
}
func (c *Const) String() string {
return c.RelString(c.Parent().pkg())
}
func (v *ArrayConst) RelString(pkg *types.Package) string {
return fmt.Sprintf("ArrayConst <%s>", relType(v.Type(), pkg))
}
func (v *ArrayConst) String() string {
return v.RelString(v.Parent().pkg())
}
func (v *AggregateConst) RelString(pkg *types.Package) string {
values := make([]string, len(v.Values))
for i, v := range v.Values {
if v != nil {
values[i] = v.RelString(pkg)
} else {
values[i] = "nil"
}
}
return fmt.Sprintf("AggregateConst <%s> (%s)", relType(v.Type(), pkg), strings.Join(values, ", "))
}
func (v *GenericConst) RelString(pkg *types.Package) string {
return fmt.Sprintf("GenericConst <%s>", relType(v.Type(), pkg))
}
func (v *GenericConst) String() string {
return v.RelString(v.Parent().pkg())
}
func (v *AggregateConst) String() string {
return v.RelString(v.Parent().pkg())
}
// IsNil returns true if this constant represents a typed or untyped nil value.
func (c *Const) IsNil() bool {
return c.Value == nil
}
// Int64 returns the numeric value of this constant truncated to fit
// a signed 64-bit integer.
//
func (c *Const) Int64() int64 {
switch x := constant.ToInt(c.Value); x.Kind() {
case constant.Int:
if i, ok := constant.Int64Val(x); ok {
return i
}
return 0
case constant.Float:
f, _ := constant.Float64Val(x)
return int64(f)
}
panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
}
// Uint64 returns the numeric value of this constant truncated to fit
// an unsigned 64-bit integer.
//
func (c *Const) Uint64() uint64 {
switch x := constant.ToInt(c.Value); x.Kind() {
case constant.Int:
if u, ok := constant.Uint64Val(x); ok {
return u
}
return 0
case constant.Float:
f, _ := constant.Float64Val(x)
return uint64(f)
}
panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
}
// Float64 returns the numeric value of this constant truncated to fit
// a float64.
//
func (c *Const) Float64() float64 {
f, _ := constant.Float64Val(c.Value)
return f
}
// Complex128 returns the complex value of this constant truncated to
// fit a complex128.
//
func (c *Const) Complex128() complex128 {
re, _ := constant.Float64Val(constant.Real(c.Value))
im, _ := constant.Float64Val(constant.Imag(c.Value))
return complex(re, im)
}
func (c *Const) equal(o Constant) bool {
// TODO(dh): don't use == for types, this will miss identical pointer types, among others
oc, ok := o.(*Const)
if !ok {
return false
}
return c.typ == oc.typ && c.Value == oc.Value
}
func (c *AggregateConst) equal(o Constant) bool {
oc, ok := o.(*AggregateConst)
if !ok {
return false
}
// TODO(dh): don't use == for types, this will miss identical pointer types, among others
return c.typ == oc.typ
}
func (c *ArrayConst) equal(o Constant) bool {
oc, ok := o.(*ArrayConst)
if !ok {
return false
}
// TODO(dh): don't use == for types, this will miss identical pointer types, among others
return c.typ == oc.typ
}
func (c *GenericConst) equal(o Constant) bool {
oc, ok := o.(*GenericConst)
if !ok {
return false
}
// TODO(dh): don't use == for types, this will miss identical pointer types, among others
return c.typ == oc.typ
}

View File

@@ -0,0 +1,288 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// This file implements the CREATE phase of IR construction.
// See builder.go for explanation.
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"os"
"sync"
"honnef.co/go/tools/go/types/typeutil"
)
// measured on the standard library and rounded up to powers of two,
// on average there are 8 blocks and 16 instructions per block in a
// function.
const avgBlocks = 8
const avgInstructionsPerBlock = 16
// NewProgram returns a new IR Program.
//
// mode controls diagnostics and checking during IR construction.
//
func NewProgram(fset *token.FileSet, mode BuilderMode) *Program {
prog := &Program{
Fset: fset,
imported: make(map[string]*Package),
packages: make(map[*types.Package]*Package),
thunks: make(map[selectionKey]*Function),
bounds: make(map[*types.Func]*Function),
mode: mode,
}
h := typeutil.MakeHasher() // protected by methodsMu, in effect
prog.methodSets.SetHasher(h)
prog.canon.SetHasher(h)
return prog
}
// memberFromObject populates package pkg with a member for the
// typechecker object obj.
//
// For objects from Go source code, syntax is the associated syntax
// tree (for funcs and vars only); it will be used during the build
// phase.
//
func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
name := obj.Name()
switch obj := obj.(type) {
case *types.Builtin:
if pkg.Pkg != types.Unsafe {
panic("unexpected builtin object: " + obj.String())
}
case *types.TypeName:
pkg.Members[name] = &Type{
object: obj,
pkg: pkg,
}
case *types.Const:
c := &NamedConst{
object: obj,
Value: NewConst(obj.Val(), obj.Type()),
pkg: pkg,
}
pkg.values[obj] = c.Value
pkg.Members[name] = c
case *types.Var:
g := &Global{
Pkg: pkg,
name: name,
object: obj,
typ: types.NewPointer(obj.Type()), // address
}
pkg.values[obj] = g
pkg.Members[name] = g
case *types.Func:
sig := obj.Type().(*types.Signature)
if sig.Recv() == nil && name == "init" {
pkg.ninit++
name = fmt.Sprintf("init#%d", pkg.ninit)
}
fn := &Function{
name: name,
object: obj,
Signature: sig,
Pkg: pkg,
Prog: pkg.Prog,
}
fn.source = syntax
fn.initHTML(pkg.printFunc)
if syntax == nil {
fn.Synthetic = SyntheticLoadedFromExportData
} else {
// Note: we initialize fn.Blocks in
// (*builder).buildFunction and not here because Blocks
// being nil is used to indicate that building of the
// function hasn't started yet.
fn.functionBody = &functionBody{
scratchInstructions: make([]Instruction, avgBlocks*avgInstructionsPerBlock),
}
}
pkg.values[obj] = fn
pkg.Functions = append(pkg.Functions, fn)
if sig.Recv() == nil {
pkg.Members[name] = fn // package-level function
}
default: // (incl. *types.Package)
panic("unexpected Object type: " + obj.String())
}
}
// membersFromDecl populates package pkg with members for each
// typechecker object (var, func, const or type) associated with the
// specified decl.
//
func membersFromDecl(pkg *Package, decl ast.Decl) {
switch decl := decl.(type) {
case *ast.GenDecl: // import, const, type or var
switch decl.Tok {
case token.CONST:
for _, spec := range decl.Specs {
for _, id := range spec.(*ast.ValueSpec).Names {
if !isBlankIdent(id) {
memberFromObject(pkg, pkg.info.Defs[id], nil)
}
}
}
case token.VAR:
for _, spec := range decl.Specs {
for _, id := range spec.(*ast.ValueSpec).Names {
if !isBlankIdent(id) {
memberFromObject(pkg, pkg.info.Defs[id], spec)
}
}
}
case token.TYPE:
for _, spec := range decl.Specs {
id := spec.(*ast.TypeSpec).Name
if !isBlankIdent(id) {
memberFromObject(pkg, pkg.info.Defs[id], nil)
}
}
}
case *ast.FuncDecl:
id := decl.Name
if !isBlankIdent(id) {
memberFromObject(pkg, pkg.info.Defs[id], decl)
}
}
}
// CreatePackage constructs and returns an IR Package from the
// specified type-checked, error-free file ASTs, and populates its
// Members mapping.
//
// importable determines whether this package should be returned by a
// subsequent call to ImportedPackage(pkg.Path()).
//
// The real work of building IR form for each function is not done
// until a subsequent call to Package.Build().
//
func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package {
p := &Package{
Prog: prog,
Members: make(map[string]Member),
values: make(map[types.Object]Value),
Pkg: pkg,
info: info, // transient (CREATE and BUILD phases)
files: files, // transient (CREATE and BUILD phases)
printFunc: prog.PrintFunc,
}
// Add init() function.
p.init = &Function{
name: "init",
Signature: new(types.Signature),
Synthetic: SyntheticPackageInitializer,
Pkg: p,
Prog: prog,
functionBody: new(functionBody),
}
p.init.initHTML(prog.PrintFunc)
p.Members[p.init.name] = p.init
p.Functions = append(p.Functions, p.init)
// CREATE phase.
// Allocate all package members: vars, funcs, consts and types.
if len(files) > 0 {
// Go source package.
for _, file := range files {
for _, decl := range file.Decls {
membersFromDecl(p, decl)
}
}
} else {
// GC-compiled binary package (or "unsafe")
// No code.
// No position information.
scope := p.Pkg.Scope()
for _, name := range scope.Names() {
obj := scope.Lookup(name)
memberFromObject(p, obj, nil)
if obj, ok := obj.(*types.TypeName); ok {
if named, ok := obj.Type().(*types.Named); ok {
for i, n := 0, named.NumMethods(); i < n; i++ {
memberFromObject(p, named.Method(i), nil)
}
}
}
}
}
// Add initializer guard variable.
initguard := &Global{
Pkg: p,
name: "init$guard",
typ: types.NewPointer(tBool),
}
p.Members[initguard.Name()] = initguard
if prog.mode&GlobalDebug != 0 {
p.SetDebugMode(true)
}
if prog.mode&PrintPackages != 0 {
printMu.Lock()
p.WriteTo(os.Stdout)
printMu.Unlock()
}
if importable {
prog.imported[p.Pkg.Path()] = p
}
prog.packages[p.Pkg] = p
return p
}
// printMu serializes printing of Packages/Functions to stdout.
var printMu sync.Mutex
// AllPackages returns a new slice containing all packages in the
// program prog in unspecified order.
//
func (prog *Program) AllPackages() []*Package {
pkgs := make([]*Package, 0, len(prog.packages))
for _, pkg := range prog.packages {
pkgs = append(pkgs, pkg)
}
return pkgs
}
// ImportedPackage returns the importable Package whose PkgPath
// is path, or nil if no such Package has been created.
//
// A parameter to CreatePackage determines whether a package should be
// considered importable. For example, no import declaration can resolve
// to the ad-hoc main package created by 'go build foo.go'.
//
// TODO(adonovan): rethink this function and the "importable" concept;
// most packages are importable. This function assumes that all
// types.Package.Path values are unique within the ir.Program, which is
// false---yet this function remains very convenient.
// Clients should use (*Program).Package instead where possible.
// IR doesn't really need a string-keyed map of packages.
//
func (prog *Program) ImportedPackage(path string) *Package {
return prog.imported[path]
}

130
vendor/honnef.co/go/tools/go/ir/doc.go vendored Normal file
View File

@@ -0,0 +1,130 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ir defines a representation of the elements of Go programs
// (packages, types, functions, variables and constants) using a
// static single-information (SSI) form intermediate representation
// (IR) for the bodies of functions.
//
// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
//
// For an introduction to SSA form, upon which SSI builds, see
// http://en.wikipedia.org/wiki/Static_single_assignment_form.
// This page provides a broader reading list:
// http://www.dcs.gla.ac.uk/~jsinger/ssa.html.
//
// For an introduction to SSI form, see The static single information
// form by C. Scott Ananian.
//
// The level of abstraction of the IR form is intentionally close to
// the source language to facilitate construction of source analysis
// tools. It is not intended for machine code generation.
//
// The simplest way to create the IR of a package is
// to load typed syntax trees using golang.org/x/tools/go/packages, then
// invoke the irutil.Packages helper function. See ExampleLoadPackages
// and ExampleWholeProgram for examples.
// The resulting ir.Program contains all the packages and their
// members, but IR code is not created for function bodies until a
// subsequent call to (*Package).Build or (*Program).Build.
//
// The builder initially builds a naive IR form in which all local
// variables are addresses of stack locations with explicit loads and
// stores. Registerization of eligible locals and φ-node insertion
// using dominance and dataflow are then performed as a second pass
// called "lifting" to improve the accuracy and performance of
// subsequent analyses; this pass can be skipped by setting the
// NaiveForm builder flag.
//
// The primary interfaces of this package are:
//
// - Member: a named member of a Go package.
// - Value: an expression that yields a value.
// - Instruction: a statement that consumes values and performs computation.
// - Node: a Value or Instruction (emphasizing its membership in the IR value graph)
//
// A computation that yields a result implements both the Value and
// Instruction interfaces. The following table shows for each
// concrete type which of these interfaces it implements.
//
// Value? Instruction? Member?
// *Alloc ✔ ✔
// *BinOp ✔ ✔
// *BlankStore ✔
// *Builtin ✔
// *Call ✔ ✔
// *ChangeInterface ✔ ✔
// *ChangeType ✔ ✔
// *Const ✔ ✔
// *Convert ✔ ✔
// *DebugRef ✔
// *Defer ✔ ✔
// *Extract ✔ ✔
// *Field ✔ ✔
// *FieldAddr ✔ ✔
// *FreeVar ✔
// *Function ✔ ✔ (func)
// *Global ✔ ✔ (var)
// *Go ✔ ✔
// *If ✔
// *Index ✔ ✔
// *IndexAddr ✔ ✔
// *Jump ✔
// *Load ✔ ✔
// *MakeChan ✔ ✔
// *MakeClosure ✔ ✔
// *MakeInterface ✔ ✔
// *MakeMap ✔ ✔
// *MakeSlice ✔ ✔
// *MapLookup ✔ ✔
// *MapUpdate ✔ ✔
// *NamedConst ✔ (const)
// *Next ✔ ✔
// *Panic ✔
// *Parameter ✔ ✔
// *Phi ✔ ✔
// *Range ✔ ✔
// *Recv ✔ ✔
// *Return ✔
// *RunDefers ✔
// *Select ✔ ✔
// *Send ✔ ✔
// *Sigma ✔ ✔
// *Slice ✔ ✔
// *SliceToArrayPointer ✔ ✔
// *Store ✔ ✔
// *StringLookup ✔ ✔
// *Type ✔ (type)
// *TypeAssert ✔ ✔
// *UnOp ✔ ✔
// *Unreachable ✔
//
// Other key types in this package include: Program, Package, Function
// and BasicBlock.
//
// The program representation constructed by this package is fully
// resolved internally, i.e. it does not rely on the names of Values,
// Packages, Functions, Types or BasicBlocks for the correct
// interpretation of the program. Only the identities of objects and
// the topology of the IR and type graphs are semantically
// significant. (There is one exception: Ids, used to identify field
// and method names, contain strings.) Avoidance of name-based
// operations simplifies the implementation of subsequent passes and
// can make them very efficient. Many objects are nonetheless named
// to aid in debugging, but it is not essential that the names be
// either accurate or unambiguous. The public API exposes a number of
// name-based maps for client convenience.
//
// The ir/irutil package provides various utilities that depend only
// on the public API of this package.
//
// TODO(adonovan): Consider the exceptional control-flow implications
// of defer and recover().
//
// TODO(adonovan): write a how-to document for all the various cases
// of trying to determine corresponding elements across the four
// domains of source locations, ast.Nodes, types.Objects,
// ir.Values/Instructions.
//
package ir

469
vendor/honnef.co/go/tools/go/ir/dom.go vendored Normal file
View File

@@ -0,0 +1,469 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// This file defines algorithms related to dominance.
// Dominator tree construction ----------------------------------------
//
// We use the algorithm described in Lengauer & Tarjan. 1979. A fast
// algorithm for finding dominators in a flowgraph.
// http://doi.acm.org/10.1145/357062.357071
//
// We also apply the optimizations to SLT described in Georgiadis et
// al, Finding Dominators in Practice, JGAA 2006,
// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf
// to avoid the need for buckets of size > 1.
import (
"bytes"
"fmt"
"io"
"math/big"
"os"
"sort"
)
// Idom returns the block that immediately dominates b:
// its parent in the dominator tree, if any.
// The entry node (b.Index==0) does not have a parent.
//
func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom }
// Dominees returns the list of blocks that b immediately dominates:
// its children in the dominator tree.
//
func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children }
// Dominates reports whether b dominates c.
func (b *BasicBlock) Dominates(c *BasicBlock) bool {
return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post
}
type byDomPreorder []*BasicBlock
func (a byDomPreorder) Len() int { return len(a) }
func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre }
// DomPreorder returns a new slice containing the blocks of f in
// dominator tree preorder.
//
func (f *Function) DomPreorder() []*BasicBlock {
n := len(f.Blocks)
order := make(byDomPreorder, n)
copy(order, f.Blocks)
sort.Sort(order)
return order
}
// domInfo contains a BasicBlock's dominance information.
type domInfo struct {
idom *BasicBlock // immediate dominator (parent in domtree)
children []*BasicBlock // nodes immediately dominated by this one
pre, post int32 // pre- and post-order numbering within domtree
}
// buildDomTree computes the dominator tree of f using the LT algorithm.
// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run).
//
func buildDomTree(fn *Function) {
// The step numbers refer to the original LT paper; the
// reordering is due to Georgiadis.
// Clear any previous domInfo.
for _, b := range fn.Blocks {
b.dom = domInfo{}
}
idoms := make([]*BasicBlock, len(fn.Blocks))
order := make([]*BasicBlock, 0, len(fn.Blocks))
seen := fn.blockset(0)
var dfs func(b *BasicBlock)
dfs = func(b *BasicBlock) {
if !seen.Add(b) {
return
}
for _, succ := range b.Succs {
dfs(succ)
}
if fn.fakeExits.Has(b) {
dfs(fn.Exit)
}
order = append(order, b)
b.post = len(order) - 1
}
dfs(fn.Blocks[0])
for i := 0; i < len(order)/2; i++ {
o := len(order) - i - 1
order[i], order[o] = order[o], order[i]
}
idoms[fn.Blocks[0].Index] = fn.Blocks[0]
changed := true
for changed {
changed = false
// iterate over all nodes in reverse postorder, except for the
// entry node
for _, b := range order[1:] {
var newIdom *BasicBlock
do := func(p *BasicBlock) {
if idoms[p.Index] == nil {
return
}
if newIdom == nil {
newIdom = p
} else {
finger1 := p
finger2 := newIdom
for finger1 != finger2 {
for finger1.post < finger2.post {
finger1 = idoms[finger1.Index]
}
for finger2.post < finger1.post {
finger2 = idoms[finger2.Index]
}
}
newIdom = finger1
}
}
for _, p := range b.Preds {
do(p)
}
if b == fn.Exit {
for _, p := range fn.Blocks {
if fn.fakeExits.Has(p) {
do(p)
}
}
}
if idoms[b.Index] != newIdom {
idoms[b.Index] = newIdom
changed = true
}
}
}
for i, b := range idoms {
fn.Blocks[i].dom.idom = b
if b == nil {
// malformed CFG
continue
}
if i == b.Index {
continue
}
b.dom.children = append(b.dom.children, fn.Blocks[i])
}
numberDomTree(fn.Blocks[0], 0, 0)
// printDomTreeDot(os.Stderr, fn) // debugging
// printDomTreeText(os.Stderr, root, 0) // debugging
if fn.Prog.mode&SanityCheckFunctions != 0 {
sanityCheckDomTree(fn)
}
}
// buildPostDomTree is like buildDomTree, but builds the post-dominator tree instead.
func buildPostDomTree(fn *Function) {
// The step numbers refer to the original LT paper; the
// reordering is due to Georgiadis.
// Clear any previous domInfo.
for _, b := range fn.Blocks {
b.pdom = domInfo{}
}
idoms := make([]*BasicBlock, len(fn.Blocks))
order := make([]*BasicBlock, 0, len(fn.Blocks))
seen := fn.blockset(0)
var dfs func(b *BasicBlock)
dfs = func(b *BasicBlock) {
if !seen.Add(b) {
return
}
for _, pred := range b.Preds {
dfs(pred)
}
if b == fn.Exit {
for _, p := range fn.Blocks {
if fn.fakeExits.Has(p) {
dfs(p)
}
}
}
order = append(order, b)
b.post = len(order) - 1
}
dfs(fn.Exit)
for i := 0; i < len(order)/2; i++ {
o := len(order) - i - 1
order[i], order[o] = order[o], order[i]
}
idoms[fn.Exit.Index] = fn.Exit
changed := true
for changed {
changed = false
// iterate over all nodes in reverse postorder, except for the
// exit node
for _, b := range order[1:] {
var newIdom *BasicBlock
do := func(p *BasicBlock) {
if idoms[p.Index] == nil {
return
}
if newIdom == nil {
newIdom = p
} else {
finger1 := p
finger2 := newIdom
for finger1 != finger2 {
for finger1.post < finger2.post {
finger1 = idoms[finger1.Index]
}
for finger2.post < finger1.post {
finger2 = idoms[finger2.Index]
}
}
newIdom = finger1
}
}
for _, p := range b.Succs {
do(p)
}
if fn.fakeExits.Has(b) {
do(fn.Exit)
}
if idoms[b.Index] != newIdom {
idoms[b.Index] = newIdom
changed = true
}
}
}
for i, b := range idoms {
fn.Blocks[i].pdom.idom = b
if b == nil {
// malformed CFG
continue
}
if i == b.Index {
continue
}
b.pdom.children = append(b.pdom.children, fn.Blocks[i])
}
numberPostDomTree(fn.Exit, 0, 0)
// printPostDomTreeDot(os.Stderr, fn) // debugging
// printPostDomTreeText(os.Stderr, fn.Exit, 0) // debugging
if fn.Prog.mode&SanityCheckFunctions != 0 { // XXX
sanityCheckDomTree(fn) // XXX
}
}
// numberDomTree sets the pre- and post-order numbers of a depth-first
// traversal of the dominator tree rooted at v. These are used to
// answer dominance queries in constant time.
//
func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) {
v.dom.pre = pre
pre++
for _, child := range v.dom.children {
pre, post = numberDomTree(child, pre, post)
}
v.dom.post = post
post++
return pre, post
}
// numberPostDomTree sets the pre- and post-order numbers of a depth-first
// traversal of the post-dominator tree rooted at v. These are used to
// answer post-dominance queries in constant time.
//
func numberPostDomTree(v *BasicBlock, pre, post int32) (int32, int32) {
v.pdom.pre = pre
pre++
for _, child := range v.pdom.children {
pre, post = numberPostDomTree(child, pre, post)
}
v.pdom.post = post
post++
return pre, post
}
// Testing utilities ----------------------------------------
// sanityCheckDomTree checks the correctness of the dominator tree
// computed by the LT algorithm by comparing against the dominance
// relation computed by a naive Kildall-style forward dataflow
// analysis (Algorithm 10.16 from the "Dragon" book).
//
func sanityCheckDomTree(f *Function) {
n := len(f.Blocks)
// D[i] is the set of blocks that dominate f.Blocks[i],
// represented as a bit-set of block indices.
D := make([]big.Int, n)
one := big.NewInt(1)
// all is the set of all blocks; constant.
var all big.Int
all.Set(one).Lsh(&all, uint(n)).Sub(&all, one)
// Initialization.
for i := range f.Blocks {
if i == 0 {
// A root is dominated only by itself.
D[i].SetBit(&D[0], 0, 1)
} else {
// All other blocks are (initially) dominated
// by every block.
D[i].Set(&all)
}
}
// Iteration until fixed point.
for changed := true; changed; {
changed = false
for i, b := range f.Blocks {
if i == 0 {
continue
}
// Compute intersection across predecessors.
var x big.Int
x.Set(&all)
for _, pred := range b.Preds {
x.And(&x, &D[pred.Index])
}
if b == f.Exit {
for _, p := range f.Blocks {
if f.fakeExits.Has(p) {
x.And(&x, &D[p.Index])
}
}
}
x.SetBit(&x, i, 1) // a block always dominates itself.
if D[i].Cmp(&x) != 0 {
D[i].Set(&x)
changed = true
}
}
}
// Check the entire relation. O(n^2).
ok := true
for i := 0; i < n; i++ {
for j := 0; j < n; j++ {
b, c := f.Blocks[i], f.Blocks[j]
actual := b.Dominates(c)
expected := D[j].Bit(i) == 1
if actual != expected {
fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected)
ok = false
}
}
}
preorder := f.DomPreorder()
for _, b := range f.Blocks {
if got := preorder[b.dom.pre]; got != b {
fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b)
ok = false
}
}
if !ok {
panic("sanityCheckDomTree failed for " + f.String())
}
}
// Printing functions ----------------------------------------
// printDomTree prints the dominator tree as text, using indentation.
//lint:ignore U1000 used during debugging
func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) {
fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v)
for _, child := range v.dom.children {
printDomTreeText(buf, child, indent+1)
}
}
// printDomTreeDot prints the dominator tree of f in AT&T GraphViz
// (.dot) format.
//lint:ignore U1000 used during debugging
func printDomTreeDot(buf io.Writer, f *Function) {
fmt.Fprintln(buf, "//", f)
fmt.Fprintln(buf, "digraph domtree {")
for i, b := range f.Blocks {
v := b.dom
fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post)
// TODO(adonovan): improve appearance of edges
// belonging to both dominator tree and CFG.
// Dominator tree edge.
if i != 0 {
fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre)
}
// CFG edges.
for _, pred := range b.Preds {
fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre)
}
if f.fakeExits.Has(b) {
fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0,color=red];\n", b.dom.pre, f.Exit.dom.pre)
}
}
fmt.Fprintln(buf, "}")
}
// printDomTree prints the dominator tree as text, using indentation.
//lint:ignore U1000 used during debugging
func printPostDomTreeText(buf io.Writer, v *BasicBlock, indent int) {
fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v)
for _, child := range v.pdom.children {
printPostDomTreeText(buf, child, indent+1)
}
}
// printDomTreeDot prints the dominator tree of f in AT&T GraphViz
// (.dot) format.
//lint:ignore U1000 used during debugging
func printPostDomTreeDot(buf io.Writer, f *Function) {
fmt.Fprintln(buf, "//", f)
fmt.Fprintln(buf, "digraph pdomtree {")
for _, b := range f.Blocks {
v := b.pdom
fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post)
// TODO(adonovan): improve appearance of edges
// belonging to both dominator tree and CFG.
// Dominator tree edge.
if b != f.Exit {
fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.pdom.pre, v.pre)
}
// CFG edges.
for _, pred := range b.Preds {
fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.pdom.pre, v.pre)
}
if f.fakeExits.Has(b) {
fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0,color=red];\n", b.dom.pre, f.Exit.dom.pre)
}
}
fmt.Fprintln(buf, "}")
}

495
vendor/honnef.co/go/tools/go/ir/emit.go vendored Normal file
View File

@@ -0,0 +1,495 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// Helpers for emitting IR instructions.
import (
"fmt"
"go/ast"
"go/constant"
"go/token"
"go/types"
"honnef.co/go/tools/go/types/typeutil"
"golang.org/x/exp/typeparams"
)
// emitNew emits to f a new (heap Alloc) instruction allocating an
// object of type typ. pos is the optional source location.
//
func emitNew(f *Function, typ types.Type, source ast.Node) *Alloc {
v := &Alloc{Heap: true}
v.setType(types.NewPointer(typ))
f.emit(v, source)
return v
}
// emitLoad emits to f an instruction to load the address addr into a
// new temporary, and returns the value so defined.
//
func emitLoad(f *Function, addr Value, source ast.Node) *Load {
v := &Load{X: addr}
v.setType(deref(addr.Type()))
f.emit(v, source)
return v
}
func emitRecv(f *Function, ch Value, commaOk bool, typ types.Type, source ast.Node) Value {
recv := &Recv{
Chan: ch,
CommaOk: commaOk,
}
recv.setType(typ)
return f.emit(recv, source)
}
// emitDebugRef emits to f a DebugRef pseudo-instruction associating
// expression e with value v.
//
func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) {
ref := makeDebugRef(f, e, v, isAddr)
if ref == nil {
return
}
f.emit(ref, nil)
}
func makeDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) *DebugRef {
if !f.debugInfo() {
return nil // debugging not enabled
}
if v == nil || e == nil {
panic("nil")
}
var obj types.Object
e = unparen(e)
if id, ok := e.(*ast.Ident); ok {
if isBlankIdent(id) {
return nil
}
obj = f.Pkg.objectOf(id)
switch obj.(type) {
case *types.Nil, *types.Const, *types.Builtin:
return nil
}
}
return &DebugRef{
X: v,
Expr: e,
IsAddr: isAddr,
object: obj,
}
}
// emitArith emits to f code to compute the binary operation op(x, y)
// where op is an eager shift, logical or arithmetic operation.
// (Use emitCompare() for comparisons and Builder.logicalBinop() for
// non-eager operations.)
//
func emitArith(f *Function, op token.Token, x, y Value, t types.Type, source ast.Node) Value {
switch op {
case token.SHL, token.SHR:
x = emitConv(f, x, t, source)
// y may be signed or an 'untyped' constant.
// There is a runtime panic if y is signed and <0. Instead of inserting a check for y<0
// and converting to an unsigned value (like the compiler) leave y as is.
if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 {
// Untyped conversion:
// Spec https://go.dev/ref/spec#Operators:
// The right operand in a shift expression must have integer type or be an untyped constant
// representable by a value of type uint.
y = emitConv(f, y, types.Typ[types.Uint], source)
}
case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
x = emitConv(f, x, t, source)
y = emitConv(f, y, t, source)
default:
panic("illegal op in emitArith: " + op.String())
}
v := &BinOp{
Op: op,
X: x,
Y: y,
}
v.setType(t)
return f.emit(v, source)
}
// emitCompare emits to f code compute the boolean result of
// comparison comparison 'x op y'.
//
func emitCompare(f *Function, op token.Token, x, y Value, source ast.Node) Value {
xt := x.Type().Underlying()
yt := y.Type().Underlying()
// Special case to optimise a tagless SwitchStmt so that
// these are equivalent
// switch { case e: ...}
// switch true { case e: ... }
// if e==true { ... }
// even in the case when e's type is an interface.
// TODO(adonovan): opt: generalise to x==true, false!=y, etc.
if x, ok := x.(*Const); ok && op == token.EQL && x.Value != nil && x.Value.Kind() == constant.Bool && constant.BoolVal(x.Value) {
if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 {
return y
}
}
if types.Identical(xt, yt) {
// no conversion necessary
} else if _, ok := xt.(*types.Interface); ok && !typeparams.IsTypeParam(x.Type()) {
y = emitConv(f, y, x.Type(), source)
} else if _, ok := yt.(*types.Interface); ok && !typeparams.IsTypeParam(y.Type()) {
x = emitConv(f, x, y.Type(), source)
} else if _, ok := x.(*Const); ok {
x = emitConv(f, x, y.Type(), source)
} else if _, ok := y.(*Const); ok {
y = emitConv(f, y, x.Type(), source)
//lint:ignore SA9003 no-op
} else {
// other cases, e.g. channels. No-op.
}
v := &BinOp{
Op: op,
X: x,
Y: y,
}
v.setType(tBool)
return f.emit(v, source)
}
// isValuePreserving returns true if a conversion from ut_src to
// ut_dst is value-preserving, i.e. just a change of type.
// Precondition: neither argument is a named type.
//
func isValuePreserving(ut_src, ut_dst types.Type) bool {
// Identical underlying types?
if types.IdenticalIgnoreTags(ut_dst, ut_src) {
return true
}
switch ut_dst.(type) {
case *types.Chan:
// Conversion between channel types?
_, ok := ut_src.(*types.Chan)
return ok
case *types.Pointer:
// Conversion between pointers with identical base types?
_, ok := ut_src.(*types.Pointer)
return ok
}
return false
}
// emitConv emits to f code to convert Value val to exactly type typ,
// and returns the converted value. Implicit conversions are required
// by language assignability rules in assignments, parameter passing,
// etc.
//
func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value {
t_src := val.Type()
// Identical types? Conversion is a no-op.
if types.Identical(t_src, t_dst) {
return val
}
ut_dst := t_dst.Underlying()
ut_src := t_src.Underlying()
tset_dst := typeutil.NewTypeSet(ut_dst)
tset_src := typeutil.NewTypeSet(ut_src)
// Just a change of type, but not value or representation?
if tset_src.All(func(termSrc *typeparams.Term) bool {
return tset_dst.All(func(termDst *typeparams.Term) bool {
return isValuePreserving(termSrc.Type().Underlying(), termDst.Type().Underlying())
})
}) {
c := &ChangeType{X: val}
c.setType(t_dst)
return f.emit(c, source)
}
// Conversion to, or construction of a value of, an interface type?
if _, ok := ut_dst.(*types.Interface); ok && !typeparams.IsTypeParam(t_dst) {
// Assignment from one interface type to another?
if _, ok := ut_src.(*types.Interface); ok && !typeparams.IsTypeParam(t_src) {
c := &ChangeInterface{X: val}
c.setType(t_dst)
return f.emit(c, source)
}
// Untyped nil constant? Return interface-typed nil constant.
if ut_src == tUntypedNil {
return emitConst(f, nilConst(t_dst))
}
// Convert (non-nil) "untyped" literals to their default type.
if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 {
val = emitConv(f, val, types.Default(ut_src), source)
}
f.Pkg.Prog.needMethodsOf(val.Type())
mi := &MakeInterface{X: val}
mi.setType(t_dst)
return f.emit(mi, source)
}
// Conversion of a compile-time constant value? Note that converting a constant to a type parameter never results in
// a constant value.
if c, ok := val.(*Const); ok {
if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() {
// Conversion of a compile-time constant to
// another constant type results in a new
// constant of the destination type and
// (initially) the same abstract value.
// We don't truncate the value yet.
return emitConst(f, NewConst(c.Value, t_dst))
}
// We're converting from constant to non-constant type,
// e.g. string -> []byte/[]rune.
}
// Conversion from slice to array pointer?
if tset_src.All(func(termSrc *typeparams.Term) bool {
return tset_dst.All(func(termDst *typeparams.Term) bool {
if slice, ok := termSrc.Type().Underlying().(*types.Slice); ok {
if ptr, ok := termDst.Type().Underlying().(*types.Pointer); ok {
if arr, ok := ptr.Elem().Underlying().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) {
return true
}
}
}
return false
})
}) {
c := &SliceToArrayPointer{X: val}
c.setType(t_dst)
return f.emit(c, source)
}
// A representation-changing conversion?
// At least one of {ut_src,ut_dst} must be *Basic.
// (The other may be []byte or []rune.)
ok1 := tset_src.Any(func(term *typeparams.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok })
ok2 := tset_dst.Any(func(term *typeparams.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok })
if ok1 || ok2 {
c := &Convert{X: val}
c.setType(t_dst)
return f.emit(c, source)
}
panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), t_dst))
}
// emitStore emits to f an instruction to store value val at location
// addr, applying implicit conversions as required by assignability rules.
//
func emitStore(f *Function, addr, val Value, source ast.Node) *Store {
s := &Store{
Addr: addr,
Val: emitConv(f, val, deref(addr.Type()), source),
}
// make sure we call getMem after the call to emitConv, which may
// itself update the memory state
f.emit(s, source)
return s
}
// emitJump emits to f a jump to target, and updates the control-flow graph.
// Postcondition: f.currentBlock is nil.
//
func emitJump(f *Function, target *BasicBlock, source ast.Node) *Jump {
b := f.currentBlock
j := new(Jump)
b.emit(j, source)
addEdge(b, target)
f.currentBlock = nil
return j
}
// emitIf emits to f a conditional jump to tblock or fblock based on
// cond, and updates the control-flow graph.
// Postcondition: f.currentBlock is nil.
//
func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock, source ast.Node) *If {
b := f.currentBlock
stmt := &If{Cond: cond}
b.emit(stmt, source)
addEdge(b, tblock)
addEdge(b, fblock)
f.currentBlock = nil
return stmt
}
// emitExtract emits to f an instruction to extract the index'th
// component of tuple. It returns the extracted value.
//
func emitExtract(f *Function, tuple Value, index int, source ast.Node) Value {
e := &Extract{Tuple: tuple, Index: index}
e.setType(tuple.Type().(*types.Tuple).At(index).Type())
return f.emit(e, source)
}
// emitTypeAssert emits to f a type assertion value := x.(t) and
// returns the value. x.Type() must be an interface.
//
func emitTypeAssert(f *Function, x Value, t types.Type, source ast.Node) Value {
a := &TypeAssert{X: x, AssertedType: t}
a.setType(t)
return f.emit(a, source)
}
// emitTypeTest emits to f a type test value,ok := x.(t) and returns
// a (value, ok) tuple. x.Type() must be an interface.
//
func emitTypeTest(f *Function, x Value, t types.Type, source ast.Node) Value {
a := &TypeAssert{
X: x,
AssertedType: t,
CommaOk: true,
}
a.setType(types.NewTuple(
newVar("value", t),
varOk,
))
return f.emit(a, source)
}
// emitTailCall emits to f a function call in tail position. The
// caller is responsible for all fields of 'call' except its type.
// Intended for wrapper methods.
// Precondition: f does/will not use deferred procedure calls.
// Postcondition: f.currentBlock is nil.
//
func emitTailCall(f *Function, call *Call, source ast.Node) {
tresults := f.Signature.Results()
nr := tresults.Len()
if nr == 1 {
call.typ = tresults.At(0).Type()
} else {
call.typ = tresults
}
tuple := f.emit(call, source)
var ret Return
switch nr {
case 0:
// no-op
case 1:
ret.Results = []Value{tuple}
default:
for i := 0; i < nr; i++ {
v := emitExtract(f, tuple, i, source)
// TODO(adonovan): in principle, this is required:
// v = emitConv(f, o.Type, f.Signature.Results[i].Type)
// but in practice emitTailCall is only used when
// the types exactly match.
ret.Results = append(ret.Results, v)
}
}
f.Exit = f.newBasicBlock("exit")
emitJump(f, f.Exit, source)
f.currentBlock = f.Exit
f.emit(&ret, source)
f.currentBlock = nil
}
// emitImplicitSelections emits to f code to apply the sequence of
// implicit field selections specified by indices to base value v, and
// returns the selected value.
//
// If v is the address of a struct, the result will be the address of
// a field; if it is the value of a struct, the result will be the
// value of a field.
//
func emitImplicitSelections(f *Function, v Value, indices []int, source ast.Node) Value {
for _, index := range indices {
// We may have a generic type containing a pointer, or a pointer to a generic type containing a struct. A
// pointer to a generic containing a pointer to a struct shouldn't be possible because the outer pointer gets
// dereferenced implicitly before we get here.
fld := typeutil.CoreType(deref(v.Type())).Underlying().(*types.Struct).Field(index)
if isPointer(v.Type()) {
instr := &FieldAddr{
X: v,
Field: index,
}
instr.setType(types.NewPointer(fld.Type()))
v = f.emit(instr, source)
// Load the field's value iff indirectly embedded.
if isPointer(fld.Type()) {
v = emitLoad(f, v, source)
}
} else {
instr := &Field{
X: v,
Field: index,
}
instr.setType(fld.Type())
v = f.emit(instr, source)
}
}
return v
}
// emitFieldSelection emits to f code to select the index'th field of v.
//
// If wantAddr, the input must be a pointer-to-struct and the result
// will be the field's address; otherwise the result will be the
// field's value.
// Ident id is used for position and debug info.
//
func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value {
// We may have a generic type containing a pointer, or a pointer to a generic type containing a struct. A
// pointer to a generic containing a pointer to a struct shouldn't be possible because the outer pointer gets
// dereferenced implicitly before we get here.
vut := typeutil.CoreType(deref(v.Type())).Underlying().(*types.Struct)
fld := vut.Field(index)
if isPointer(v.Type()) {
instr := &FieldAddr{
X: v,
Field: index,
}
instr.setSource(id)
instr.setType(types.NewPointer(fld.Type()))
v = f.emit(instr, id)
// Load the field's value iff we don't want its address.
if !wantAddr {
v = emitLoad(f, v, id)
}
} else {
instr := &Field{
X: v,
Field: index,
}
instr.setSource(id)
instr.setType(fld.Type())
v = f.emit(instr, id)
}
emitDebugRef(f, id, v, wantAddr)
return v
}
// zeroValue emits to f code to produce a zero value of type t,
// and returns it.
//
func zeroValue(f *Function, t types.Type, source ast.Node) Value {
return emitConst(f, zeroConst(t))
}
func emitConst(f *Function, c Constant) Constant {
f.consts = append(f.consts, c)
return c
}

356
vendor/honnef.co/go/tools/go/ir/exits.go vendored Normal file
View File

@@ -0,0 +1,356 @@
package ir
import (
"go/types"
)
func (b *builder) buildExits(fn *Function) {
if obj := fn.Object(); obj != nil {
switch obj.Pkg().Path() {
case "runtime":
switch obj.Name() {
case "exit":
fn.NoReturn = AlwaysExits
return
case "throw":
fn.NoReturn = AlwaysExits
return
case "Goexit":
fn.NoReturn = AlwaysUnwinds
return
}
case "go.uber.org/zap":
switch obj.(*types.Func).FullName() {
case "(*go.uber.org/zap.Logger).Fatal",
"(*go.uber.org/zap.SugaredLogger).Fatal",
"(*go.uber.org/zap.SugaredLogger).Fatalw",
"(*go.uber.org/zap.SugaredLogger).Fatalf":
// Technically, this method does not unconditionally exit
// the process. It dynamically calls a function stored in
// the logger. If the function is nil, it defaults to
// os.Exit.
//
// The main intent of this method is to terminate the
// process, and that's what the vast majority of people
// will use it for. We'll happily accept some false
// negatives to avoid a lot of false positives.
fn.NoReturn = AlwaysExits
case "(*go.uber.org/zap.Logger).Panic",
"(*go.uber.org/zap.SugaredLogger).Panicw",
"(*go.uber.org/zap.SugaredLogger).Panicf":
fn.NoReturn = AlwaysUnwinds
return
case "(*go.uber.org/zap.Logger).DPanic",
"(*go.uber.org/zap.SugaredLogger).DPanicf",
"(*go.uber.org/zap.SugaredLogger).DPanicw":
// These methods will only panic in development.
}
case "github.com/sirupsen/logrus":
switch obj.(*types.Func).FullName() {
case "(*github.com/sirupsen/logrus.Logger).Exit":
// Technically, this method does not unconditionally exit
// the process. It dynamically calls a function stored in
// the logger. If the function is nil, it defaults to
// os.Exit.
//
// The main intent of this method is to terminate the
// process, and that's what the vast majority of people
// will use it for. We'll happily accept some false
// negatives to avoid a lot of false positives.
fn.NoReturn = AlwaysExits
return
case "(*github.com/sirupsen/logrus.Logger).Panic",
"(*github.com/sirupsen/logrus.Logger).Panicf",
"(*github.com/sirupsen/logrus.Logger).Panicln":
// These methods will always panic, but that's not
// statically known from the code alone, because they
// take a detour through the generic Log methods.
fn.NoReturn = AlwaysUnwinds
return
case "(*github.com/sirupsen/logrus.Entry).Panicf",
"(*github.com/sirupsen/logrus.Entry).Panicln":
// Entry.Panic has an explicit panic, but Panicf and
// Panicln do not, relying fully on the generic Log
// method.
fn.NoReturn = AlwaysUnwinds
return
case "(*github.com/sirupsen/logrus.Logger).Log",
"(*github.com/sirupsen/logrus.Logger).Logf",
"(*github.com/sirupsen/logrus.Logger).Logln":
// TODO(dh): we cannot handle these cases. Whether they
// exit or unwind depends on the level, which is set
// via the first argument. We don't currently support
// call-site-specific exit information.
}
case "github.com/golang/glog":
switch obj.(*types.Func).FullName() {
case "github.com/golang/glog.Exit",
"github.com/golang/glog.ExitDepth",
"github.com/golang/glog.Exitf",
"github.com/golang/glog.Exitln",
"github.com/golang/glog.Fatal",
"github.com/golang/glog.FatalDepth",
"github.com/golang/glog.Fatalf",
"github.com/golang/glog.Fatalln":
// all of these call os.Exit after logging
fn.NoReturn = AlwaysExits
}
case "k8s.io/klog":
switch obj.(*types.Func).FullName() {
case "k8s.io/klog.Exit",
"k8s.io/klog.ExitDepth",
"k8s.io/klog.Exitf",
"k8s.io/klog.Exitln",
"k8s.io/klog.Fatal",
"k8s.io/klog.FatalDepth",
"k8s.io/klog.Fatalf",
"k8s.io/klog.Fatalln":
// all of these call os.Exit after logging
fn.NoReturn = AlwaysExits
}
}
}
isRecoverCall := func(instr Instruction) bool {
if instr, ok := instr.(*Call); ok {
if builtin, ok := instr.Call.Value.(*Builtin); ok {
if builtin.Name() == "recover" {
return true
}
}
}
return false
}
both := NewBlockSet(len(fn.Blocks))
exits := NewBlockSet(len(fn.Blocks))
unwinds := NewBlockSet(len(fn.Blocks))
recovers := false
for _, u := range fn.Blocks {
for _, instr := range u.Instrs {
instrSwitch:
switch instr := instr.(type) {
case *Defer:
if recovers {
// avoid doing extra work, we already know that this function calls recover
continue
}
call := instr.Call.StaticCallee()
if call == nil {
// not a static call, so we can't be sure the
// deferred call isn't calling recover
recovers = true
break
}
if call.Package() == fn.Package() {
b.buildFunction(call)
}
if len(call.Blocks) == 0 {
// external function, we don't know what's
// happening inside it
//
// TODO(dh): this includes functions from
// imported packages, due to how go/analysis
// works. We could introduce another fact,
// like we've done for exiting and unwinding.
recovers = true
break
}
for _, y := range call.Blocks {
for _, instr2 := range y.Instrs {
if isRecoverCall(instr2) {
recovers = true
break instrSwitch
}
}
}
case *Panic:
both.Add(u)
unwinds.Add(u)
case CallInstruction:
switch instr.(type) {
case *Defer, *Call:
default:
continue
}
if instr.Common().IsInvoke() {
// give up
return
}
var call *Function
switch instr.Common().Value.(type) {
case *Function, *MakeClosure:
call = instr.Common().StaticCallee()
case *Builtin:
// the only builtins that affect control flow are
// panic and recover, and we've already handled
// those
continue
default:
// dynamic dispatch
return
}
// buildFunction is idempotent. if we're part of a
// (mutually) recursive call chain, then buildFunction
// will immediately return, and fn.WillExit will be false.
if call.Package() == fn.Package() {
b.buildFunction(call)
}
switch call.NoReturn {
case AlwaysExits:
both.Add(u)
exits.Add(u)
case AlwaysUnwinds:
both.Add(u)
unwinds.Add(u)
case NeverReturns:
both.Add(u)
}
}
}
}
// depth-first search trying to find a path to the exit block that
// doesn't cross any of the blacklisted blocks
seen := NewBlockSet(len(fn.Blocks))
var findPath func(root *BasicBlock, bl *BlockSet) bool
findPath = func(root *BasicBlock, bl *BlockSet) bool {
if root == fn.Exit {
return true
}
if seen.Has(root) {
return false
}
if bl.Has(root) {
return false
}
seen.Add(root)
for _, succ := range root.Succs {
if findPath(succ, bl) {
return true
}
}
return false
}
findPathEntry := func(root *BasicBlock, bl *BlockSet) bool {
if bl.Num() == 0 {
return true
}
seen.Clear()
return findPath(root, bl)
}
if !findPathEntry(fn.Blocks[0], exits) {
fn.NoReturn = AlwaysExits
} else if !recovers {
// Only consider unwinding and "never returns" if we don't
// call recover. If we do call recover, then panics don't
// bubble up the stack.
// TODO(dh): the position of the defer matters. If we
// unconditionally terminate before we defer a recover, then
// the recover is ineffective.
if !findPathEntry(fn.Blocks[0], unwinds) {
fn.NoReturn = AlwaysUnwinds
} else if !findPathEntry(fn.Blocks[0], both) {
fn.NoReturn = NeverReturns
}
}
}
func (b *builder) addUnreachables(fn *Function) {
var unreachable *BasicBlock
for _, bb := range fn.Blocks {
instrLoop:
for i, instr := range bb.Instrs {
if instr, ok := instr.(*Call); ok {
var call *Function
switch v := instr.Common().Value.(type) {
case *Function:
call = v
case *MakeClosure:
call = v.Fn.(*Function)
}
if call == nil {
continue
}
if call.Package() == fn.Package() {
// make sure we have information on all functions in this package
b.buildFunction(call)
}
switch call.NoReturn {
case AlwaysExits:
// This call will cause the process to terminate.
// Remove remaining instructions in the block and
// replace any control flow with Unreachable.
for _, succ := range bb.Succs {
succ.removePred(bb)
}
bb.Succs = bb.Succs[:0]
bb.Instrs = bb.Instrs[:i+1]
bb.emit(new(Unreachable), instr.Source())
addEdge(bb, fn.Exit)
break instrLoop
case AlwaysUnwinds:
// This call will cause the goroutine to terminate
// and defers to run (i.e. a panic or
// runtime.Goexit). Remove remaining instructions
// in the block and replace any control flow with
// an unconditional jump to the exit block.
for _, succ := range bb.Succs {
succ.removePred(bb)
}
bb.Succs = bb.Succs[:0]
bb.Instrs = bb.Instrs[:i+1]
bb.emit(new(Jump), instr.Source())
addEdge(bb, fn.Exit)
break instrLoop
case NeverReturns:
// This call will either cause the goroutine to
// terminate, or the process to terminate. Remove
// remaining instructions in the block and replace
// any control flow with a conditional jump to
// either the exit block, or Unreachable.
for _, succ := range bb.Succs {
succ.removePred(bb)
}
bb.Succs = bb.Succs[:0]
bb.Instrs = bb.Instrs[:i+1]
var c Call
c.Call.Value = &Builtin{
name: "ir:noreturnWasPanic",
sig: types.NewSignature(nil,
types.NewTuple(),
types.NewTuple(anonVar(types.Typ[types.Bool])),
false,
),
}
c.setType(types.Typ[types.Bool])
if unreachable == nil {
unreachable = fn.newBasicBlock("unreachable")
unreachable.emit(&Unreachable{}, nil)
addEdge(unreachable, fn.Exit)
}
bb.emit(&c, instr.Source())
bb.emit(&If{Cond: &c}, instr.Source())
addEdge(bb, fn.Exit)
addEdge(bb, unreachable)
break instrLoop
}
}
}
}
}

1006
vendor/honnef.co/go/tools/go/ir/func.go vendored Normal file

File diff suppressed because it is too large Load Diff

1124
vendor/honnef.co/go/tools/go/ir/html.go vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,184 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package irutil
// This file defines utility functions for constructing programs in IR form.
import (
"go/ast"
"go/token"
"go/types"
"honnef.co/go/tools/go/ir"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/packages"
)
type Options struct {
// Which function, if any, to print in HTML form
PrintFunc string
}
// Packages creates an IR program for a set of packages.
//
// The packages must have been loaded from source syntax using the
// golang.org/x/tools/go/packages.Load function in LoadSyntax or
// LoadAllSyntax mode.
//
// Packages creates an IR package for each well-typed package in the
// initial list, plus all their dependencies. The resulting list of
// packages corresponds to the list of initial packages, and may contain
// a nil if IR code could not be constructed for the corresponding initial
// package due to type errors.
//
// Code for bodies of functions is not built until Build is called on
// the resulting Program. IR code is constructed only for the initial
// packages with well-typed syntax trees.
//
// The mode parameter controls diagnostics and checking during IR construction.
//
func Packages(initial []*packages.Package, mode ir.BuilderMode, opts *Options) (*ir.Program, []*ir.Package) {
return doPackages(initial, mode, false, opts)
}
// AllPackages creates an IR program for a set of packages plus all
// their dependencies.
//
// The packages must have been loaded from source syntax using the
// golang.org/x/tools/go/packages.Load function in LoadAllSyntax mode.
//
// AllPackages creates an IR package for each well-typed package in the
// initial list, plus all their dependencies. The resulting list of
// packages corresponds to the list of initial packages, and may contain
// a nil if IR code could not be constructed for the corresponding
// initial package due to type errors.
//
// Code for bodies of functions is not built until Build is called on
// the resulting Program. IR code is constructed for all packages with
// well-typed syntax trees.
//
// The mode parameter controls diagnostics and checking during IR construction.
//
func AllPackages(initial []*packages.Package, mode ir.BuilderMode, opts *Options) (*ir.Program, []*ir.Package) {
return doPackages(initial, mode, true, opts)
}
func doPackages(initial []*packages.Package, mode ir.BuilderMode, deps bool, opts *Options) (*ir.Program, []*ir.Package) {
var fset *token.FileSet
if len(initial) > 0 {
fset = initial[0].Fset
}
prog := ir.NewProgram(fset, mode)
if opts != nil {
prog.PrintFunc = opts.PrintFunc
}
isInitial := make(map[*packages.Package]bool, len(initial))
for _, p := range initial {
isInitial[p] = true
}
irmap := make(map[*packages.Package]*ir.Package)
packages.Visit(initial, nil, func(p *packages.Package) {
if p.Types != nil && !p.IllTyped {
var files []*ast.File
if deps || isInitial[p] {
files = p.Syntax
}
irmap[p] = prog.CreatePackage(p.Types, files, p.TypesInfo, true)
}
})
var irpkgs []*ir.Package
for _, p := range initial {
irpkgs = append(irpkgs, irmap[p]) // may be nil
}
return prog, irpkgs
}
// CreateProgram returns a new program in IR form, given a program
// loaded from source. An IR package is created for each transitively
// error-free package of lprog.
//
// Code for bodies of functions is not built until Build is called
// on the result.
//
// The mode parameter controls diagnostics and checking during IR construction.
//
// Deprecated: use golang.org/x/tools/go/packages and the Packages
// function instead; see ir.ExampleLoadPackages.
//
func CreateProgram(lprog *loader.Program, mode ir.BuilderMode) *ir.Program {
prog := ir.NewProgram(lprog.Fset, mode)
for _, info := range lprog.AllPackages {
if info.TransitivelyErrorFree {
prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
}
}
return prog
}
// BuildPackage builds an IR program with IR for a single package.
//
// It populates pkg by type-checking the specified file ASTs. All
// dependencies are loaded using the importer specified by tc, which
// typically loads compiler export data; IR code cannot be built for
// those packages. BuildPackage then constructs an ir.Program with all
// dependency packages created, and builds and returns the IR package
// corresponding to pkg.
//
// The caller must have set pkg.Path() to the import path.
//
// The operation fails if there were any type-checking or import errors.
//
// See ../ir/example_test.go for an example.
//
func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ir.BuilderMode) (*ir.Package, *types.Info, error) {
if fset == nil {
panic("no token.FileSet")
}
if pkg.Path() == "" {
panic("package has no import path")
}
info := &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil {
return nil, nil, err
}
prog := ir.NewProgram(fset, mode)
// Create IR packages for all imports.
// Order is not significant.
created := make(map[*types.Package]bool)
var createAll func(pkgs []*types.Package)
createAll = func(pkgs []*types.Package) {
for _, p := range pkgs {
if !created[p] {
created[p] = true
prog.CreatePackage(p, nil, nil, true)
createAll(p.Imports())
}
}
}
createAll(pkg.Imports())
// Create and build the primary package.
irpkg := prog.CreatePackage(pkg, files, info, false)
irpkg.Build()
return irpkg, info, nil
}

View File

@@ -0,0 +1,54 @@
package irutil
import "honnef.co/go/tools/go/ir"
type Loop struct{ *ir.BlockSet }
func FindLoops(fn *ir.Function) []Loop {
if fn.Blocks == nil {
return nil
}
tree := fn.DomPreorder()
var sets []Loop
for _, h := range tree {
for _, n := range h.Preds {
if !h.Dominates(n) {
continue
}
// n is a back-edge to h
// h is the loop header
if n == h {
set := Loop{ir.NewBlockSet(len(fn.Blocks))}
set.Add(n)
sets = append(sets, set)
continue
}
set := Loop{ir.NewBlockSet(len(fn.Blocks))}
set.Add(h)
set.Add(n)
for _, b := range allPredsBut(n, h, nil) {
set.Add(b)
}
sets = append(sets, set)
}
}
return sets
}
func allPredsBut(b, but *ir.BasicBlock, list []*ir.BasicBlock) []*ir.BasicBlock {
outer:
for _, pred := range b.Preds {
if pred == but {
continue
}
for _, p := range list {
// TODO improve big-o complexity of this function
if pred == p {
continue outer
}
}
list = append(list, pred)
list = allPredsBut(pred, but, list)
}
return list
}

View File

@@ -0,0 +1,32 @@
package irutil
import (
"honnef.co/go/tools/go/ir"
)
// IsStub reports whether a function is a stub. A function is
// considered a stub if it has no instructions or if all it does is
// return a constant value.
func IsStub(fn *ir.Function) bool {
for _, b := range fn.Blocks {
for _, instr := range b.Instrs {
switch instr.(type) {
case *ir.Const:
// const naturally has no side-effects
case *ir.Panic:
// panic is a stub if it only uses constants
case *ir.Return:
// return is a stub if it only uses constants
case *ir.DebugRef:
case *ir.Jump:
// if there are no disallowed instructions, then we're
// only jumping to the exit block (or possibly
// somewhere else that's stubby?)
default:
// all other instructions are assumed to do actual work
return false
}
}
}
return true
}

View File

@@ -0,0 +1,264 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package irutil
// This file implements discovery of switch and type-switch constructs
// from low-level control flow.
//
// Many techniques exist for compiling a high-level switch with
// constant cases to efficient machine code. The optimal choice will
// depend on the data type, the specific case values, the code in the
// body of each case, and the hardware.
// Some examples:
// - a lookup table (for a switch that maps constants to constants)
// - a computed goto
// - a binary tree
// - a perfect hash
// - a two-level switch (to partition constant strings by their first byte).
import (
"bytes"
"fmt"
"go/token"
"go/types"
"honnef.co/go/tools/go/ir"
)
// A ConstCase represents a single constant comparison.
// It is part of a Switch.
type ConstCase struct {
Block *ir.BasicBlock // block performing the comparison
Body *ir.BasicBlock // body of the case
Value *ir.Const // case comparand
}
// A TypeCase represents a single type assertion.
// It is part of a Switch.
type TypeCase struct {
Block *ir.BasicBlock // block performing the type assert
Body *ir.BasicBlock // body of the case
Type types.Type // case type
Binding ir.Value // value bound by this case
}
// A Switch is a logical high-level control flow operation
// (a multiway branch) discovered by analysis of a CFG containing
// only if/else chains. It is not part of the ir.Instruction set.
//
// One of ConstCases and TypeCases has length >= 2;
// the other is nil.
//
// In a value switch, the list of cases may contain duplicate constants.
// A type switch may contain duplicate types, or types assignable
// to an interface type also in the list.
// TODO(adonovan): eliminate such duplicates.
//
type Switch struct {
Start *ir.BasicBlock // block containing start of if/else chain
X ir.Value // the switch operand
ConstCases []ConstCase // ordered list of constant comparisons
TypeCases []TypeCase // ordered list of type assertions
Default *ir.BasicBlock // successor if all comparisons fail
}
func (sw *Switch) String() string {
// We represent each block by the String() of its
// first Instruction, e.g. "print(42:int)".
var buf bytes.Buffer
if sw.ConstCases != nil {
fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name())
for _, c := range sw.ConstCases {
fmt.Fprintf(&buf, "case %s: %s\n", c.Value.Name(), c.Body.Instrs[0])
}
} else {
fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name())
for _, c := range sw.TypeCases {
fmt.Fprintf(&buf, "case %s %s: %s\n",
c.Binding.Name(), c.Type, c.Body.Instrs[0])
}
}
if sw.Default != nil {
fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0])
}
fmt.Fprintf(&buf, "}")
return buf.String()
}
// Switches examines the control-flow graph of fn and returns the
// set of inferred value and type switches. A value switch tests an
// ir.Value for equality against two or more compile-time constant
// values. Switches involving link-time constants (addresses) are
// ignored. A type switch type-asserts an ir.Value against two or
// more types.
//
// The switches are returned in dominance order.
//
// The resulting switches do not necessarily correspond to uses of the
// 'switch' keyword in the source: for example, a single source-level
// switch statement with non-constant cases may result in zero, one or
// many Switches, one per plural sequence of constant cases.
// Switches may even be inferred from if/else- or goto-based control flow.
// (In general, the control flow constructs of the source program
// cannot be faithfully reproduced from the IR.)
//
func Switches(fn *ir.Function) []Switch {
// Traverse the CFG in dominance order, so we don't
// enter an if/else-chain in the middle.
var switches []Switch
seen := make(map[*ir.BasicBlock]bool) // TODO(adonovan): opt: use ir.blockSet
for _, b := range fn.DomPreorder() {
if x, k := isComparisonBlock(b); x != nil {
// Block b starts a switch.
sw := Switch{Start: b, X: x}
valueSwitch(&sw, k, seen)
if len(sw.ConstCases) > 1 {
switches = append(switches, sw)
}
}
if y, x, T := isTypeAssertBlock(b); y != nil {
// Block b starts a type switch.
sw := Switch{Start: b, X: x}
typeSwitch(&sw, y, T, seen)
if len(sw.TypeCases) > 1 {
switches = append(switches, sw)
}
}
}
return switches
}
func isSameX(x1 ir.Value, x2 ir.Value) bool {
if x1 == x2 {
return true
}
if x2, ok := x2.(*ir.Sigma); ok {
return isSameX(x1, x2.X)
}
return false
}
func valueSwitch(sw *Switch, k *ir.Const, seen map[*ir.BasicBlock]bool) {
b := sw.Start
x := sw.X
for isSameX(sw.X, x) {
if seen[b] {
break
}
seen[b] = true
sw.ConstCases = append(sw.ConstCases, ConstCase{
Block: b,
Body: b.Succs[0],
Value: k,
})
b = b.Succs[1]
n := 0
for _, instr := range b.Instrs {
switch instr.(type) {
case *ir.If, *ir.BinOp:
n++
case *ir.Sigma, *ir.Phi, *ir.DebugRef:
default:
n += 1000
}
}
if n != 2 {
// Block b contains not just 'if x == k' and σ/ϕ nodes,
// so it may have side effects that
// make it unsafe to elide.
break
}
if len(b.Preds) != 1 {
// Block b has multiple predecessors,
// so it cannot be treated as a case.
break
}
x, k = isComparisonBlock(b)
}
sw.Default = b
}
func typeSwitch(sw *Switch, y ir.Value, T types.Type, seen map[*ir.BasicBlock]bool) {
b := sw.Start
x := sw.X
for isSameX(sw.X, x) {
if seen[b] {
break
}
seen[b] = true
sw.TypeCases = append(sw.TypeCases, TypeCase{
Block: b,
Body: b.Succs[0],
Type: T,
Binding: y,
})
b = b.Succs[1]
n := 0
for _, instr := range b.Instrs {
switch instr.(type) {
case *ir.TypeAssert, *ir.Extract, *ir.If:
n++
case *ir.Sigma, *ir.Phi:
default:
n += 1000
}
}
if n != 4 {
// Block b contains not just
// {TypeAssert; Extract #0; Extract #1; If}
// so it may have side effects that
// make it unsafe to elide.
break
}
if len(b.Preds) != 1 {
// Block b has multiple predecessors,
// so it cannot be treated as a case.
break
}
y, x, T = isTypeAssertBlock(b)
}
sw.Default = b
}
// isComparisonBlock returns the operands (v, k) if a block ends with
// a comparison v==k, where k is a compile-time constant.
//
func isComparisonBlock(b *ir.BasicBlock) (v ir.Value, k *ir.Const) {
if n := len(b.Instrs); n >= 2 {
if i, ok := b.Instrs[n-1].(*ir.If); ok {
if binop, ok := i.Cond.(*ir.BinOp); ok && binop.Block() == b && binop.Op == token.EQL {
if k, ok := binop.Y.(*ir.Const); ok {
return binop.X, k
}
if k, ok := binop.X.(*ir.Const); ok {
return binop.Y, k
}
}
}
}
return
}
// isTypeAssertBlock returns the operands (y, x, T) if a block ends with
// a type assertion "if y, ok := x.(T); ok {".
//
func isTypeAssertBlock(b *ir.BasicBlock) (y, x ir.Value, T types.Type) {
if n := len(b.Instrs); n >= 4 {
if i, ok := b.Instrs[n-1].(*ir.If); ok {
if ext1, ok := i.Cond.(*ir.Extract); ok && ext1.Block() == b && ext1.Index == 1 {
if ta, ok := ext1.Tuple.(*ir.TypeAssert); ok && ta.Block() == b {
// hack: relies upon instruction ordering.
if ext0, ok := b.Instrs[n-3].(*ir.Extract); ok {
return ext0, ta.X, ta.AssertedType
}
}
}
}
}
return
}

View File

@@ -0,0 +1,70 @@
package irutil
import (
"go/types"
"honnef.co/go/tools/go/ir"
)
// Terminates reports whether fn is supposed to return, that is if it
// has at least one theoretic path that returns from the function.
// Explicit panics do not count as terminating.
func Terminates(fn *ir.Function) bool {
if fn.Blocks == nil {
// assuming that a function terminates is the conservative
// choice
return true
}
for _, block := range fn.Blocks {
if _, ok := block.Control().(*ir.Return); ok {
if len(block.Preds) == 0 {
return true
}
for _, pred := range block.Preds {
switch ctrl := pred.Control().(type) {
case *ir.Panic:
// explicit panics do not count as terminating
case *ir.If:
// Check if we got here by receiving from a closed
// time.Tick channel this cannot happen at
// runtime and thus doesn't constitute termination
iff := ctrl
if !ok {
return true
}
ex, ok := iff.Cond.(*ir.Extract)
if !ok {
return true
}
if ex.Index != 1 {
return true
}
recv, ok := ex.Tuple.(*ir.Recv)
if !ok {
return true
}
call, ok := recv.Chan.(*ir.Call)
if !ok {
return true
}
fn, ok := call.Common().Value.(*ir.Function)
if !ok {
return true
}
fn2, ok := fn.Object().(*types.Func)
if !ok {
return true
}
if fn2.FullName() != "time.Tick" {
return true
}
default:
// we've reached the exit block
return true
}
}
}
}
return false
}

View File

@@ -0,0 +1,178 @@
package irutil
import (
"go/types"
"strings"
"honnef.co/go/tools/go/ir"
"honnef.co/go/tools/go/types/typeutil"
)
func Reachable(from, to *ir.BasicBlock) bool {
if from == to {
return true
}
if from.Dominates(to) {
return true
}
found := false
Walk(from, func(b *ir.BasicBlock) bool {
if b == to {
found = true
return false
}
return true
})
return found
}
func Walk(b *ir.BasicBlock, fn func(*ir.BasicBlock) bool) {
seen := map[*ir.BasicBlock]bool{}
wl := []*ir.BasicBlock{b}
for len(wl) > 0 {
b := wl[len(wl)-1]
wl = wl[:len(wl)-1]
if seen[b] {
continue
}
seen[b] = true
if !fn(b) {
continue
}
wl = append(wl, b.Succs...)
}
}
func Vararg(x *ir.Slice) ([]ir.Value, bool) {
var out []ir.Value
alloc, ok := ir.Unwrap(x.X).(*ir.Alloc)
if !ok {
return nil, false
}
var checkAlloc func(alloc ir.Value) bool
checkAlloc = func(alloc ir.Value) bool {
for _, ref := range *alloc.Referrers() {
if ref == x {
continue
}
if ref.Block() != x.Block() {
return false
}
switch ref := ref.(type) {
case *ir.IndexAddr:
idx := ref
if len(*idx.Referrers()) != 1 {
return false
}
store, ok := (*idx.Referrers())[0].(*ir.Store)
if !ok {
return false
}
out = append(out, store.Val)
case *ir.Copy:
if !checkAlloc(ref) {
return false
}
default:
return false
}
}
return true
}
if !checkAlloc(alloc) {
return nil, false
}
return out, true
}
func CallName(call *ir.CallCommon) string {
if call.IsInvoke() {
return ""
}
switch v := call.Value.(type) {
case *ir.Function:
fn, ok := v.Object().(*types.Func)
if !ok {
return ""
}
return typeutil.FuncName(fn)
case *ir.Builtin:
return v.Name()
}
return ""
}
func IsCallTo(call *ir.CallCommon, name string) bool { return CallName(call) == name }
func IsCallToAny(call *ir.CallCommon, names ...string) bool {
q := CallName(call)
for _, name := range names {
if q == name {
return true
}
}
return false
}
func FilterDebug(instr []ir.Instruction) []ir.Instruction {
var out []ir.Instruction
for _, ins := range instr {
if _, ok := ins.(*ir.DebugRef); !ok {
out = append(out, ins)
}
}
return out
}
func IsExample(fn *ir.Function) bool {
if !strings.HasPrefix(fn.Name(), "Example") {
return false
}
f := fn.Prog.Fset.File(fn.Pos())
if f == nil {
return false
}
return strings.HasSuffix(f.Name(), "_test.go")
}
// Flatten recursively returns the underlying value of an ir.Sigma or
// ir.Phi node. If all edges in an ir.Phi node are the same (after
// flattening), the flattened edge will get returned. If flattening is
// not possible, nil is returned.
func Flatten(v ir.Value) ir.Value {
failed := false
seen := map[ir.Value]struct{}{}
var out ir.Value
var dfs func(v ir.Value)
dfs = func(v ir.Value) {
if failed {
return
}
if _, ok := seen[v]; ok {
return
}
seen[v] = struct{}{}
switch v := v.(type) {
case *ir.Sigma:
dfs(v.X)
case *ir.Phi:
for _, e := range v.Edges {
dfs(e)
}
default:
if out == nil {
out = v
} else if out != v {
failed = true
}
}
}
dfs(v)
if failed {
return nil
}
return out
}

View File

@@ -0,0 +1,79 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package irutil
import "honnef.co/go/tools/go/ir"
// This file defines utilities for visiting the IR of
// a Program.
//
// TODO(adonovan): test coverage.
// AllFunctions finds and returns the set of functions potentially
// needed by program prog, as determined by a simple linker-style
// reachability algorithm starting from the members and method-sets of
// each package. The result may include anonymous functions and
// synthetic wrappers.
//
// Precondition: all packages are built.
//
func AllFunctions(prog *ir.Program) map[*ir.Function]bool {
visit := visitor{
prog: prog,
seen: make(map[*ir.Function]bool),
}
visit.program()
return visit.seen
}
type visitor struct {
prog *ir.Program
seen map[*ir.Function]bool
}
func (visit *visitor) program() {
for _, pkg := range visit.prog.AllPackages() {
for _, mem := range pkg.Members {
if fn, ok := mem.(*ir.Function); ok {
visit.function(fn)
}
}
}
for _, T := range visit.prog.RuntimeTypes() {
mset := visit.prog.MethodSets.MethodSet(T)
for i, n := 0, mset.Len(); i < n; i++ {
visit.function(visit.prog.MethodValue(mset.At(i)))
}
}
}
func (visit *visitor) function(fn *ir.Function) {
if !visit.seen[fn] {
visit.seen[fn] = true
var buf [10]*ir.Value // avoid alloc in common case
for _, b := range fn.Blocks {
for _, instr := range b.Instrs {
for _, op := range instr.Operands(buf[:0]) {
if fn, ok := (*op).(*ir.Function); ok {
visit.function(fn)
}
}
}
}
}
}
// MainPackages returns the subset of the specified packages
// named "main" that define a main function.
// The result may include synthetic "testmain" packages.
func MainPackages(pkgs []*ir.Package) []*ir.Package {
var mains []*ir.Package
for _, pkg := range pkgs {
if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil {
mains = append(mains, pkg)
}
}
return mains
}

1357
vendor/honnef.co/go/tools/go/ir/lift.go vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,116 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// lvalues are the union of addressable expressions and map-index
// expressions.
import (
"go/ast"
"go/types"
)
// An lvalue represents an assignable location that may appear on the
// left-hand side of an assignment. This is a generalization of a
// pointer to permit updates to elements of maps.
//
type lvalue interface {
store(fn *Function, v Value, source ast.Node) // stores v into the location
load(fn *Function, source ast.Node) Value // loads the contents of the location
address(fn *Function) Value // address of the location
typ() types.Type // returns the type of the location
}
// An address is an lvalue represented by a true pointer.
type address struct {
addr Value
expr ast.Expr // source syntax of the value (not address) [debug mode]
}
func (a *address) load(fn *Function, source ast.Node) Value {
return emitLoad(fn, a.addr, source)
}
func (a *address) store(fn *Function, v Value, source ast.Node) {
store := emitStore(fn, a.addr, v, source)
if a.expr != nil {
// store.Val is v, converted for assignability.
emitDebugRef(fn, a.expr, store.Val, false)
}
}
func (a *address) address(fn *Function) Value {
if a.expr != nil {
emitDebugRef(fn, a.expr, a.addr, true)
}
return a.addr
}
func (a *address) typ() types.Type {
return deref(a.addr.Type())
}
// An element is an lvalue represented by m[k], the location of an
// element of a map. These locations are not addressable
// since pointers cannot be formed from them, but they do support
// load() and store().
//
type element struct {
m, k Value // map
t types.Type // map element type
}
func (e *element) load(fn *Function, source ast.Node) Value {
l := &MapLookup{
X: e.m,
Index: e.k,
}
l.setType(e.t)
return fn.emit(l, source)
}
func (e *element) store(fn *Function, v Value, source ast.Node) {
up := &MapUpdate{
Map: e.m,
Key: e.k,
Value: emitConv(fn, v, e.t, source),
}
fn.emit(up, source)
}
func (e *element) address(fn *Function) Value {
panic("map elements are not addressable")
}
func (e *element) typ() types.Type {
return e.t
}
// A blank is a dummy variable whose name is "_".
// It is not reified: loads are illegal and stores are ignored.
//
type blank struct{}
func (bl blank) load(fn *Function, source ast.Node) Value {
panic("blank.load is illegal")
}
func (bl blank) store(fn *Function, v Value, source ast.Node) {
s := &BlankStore{
Val: v,
}
fn.emit(s, source)
}
func (bl blank) address(fn *Function) Value {
panic("blank var is not addressable")
}
func (bl blank) typ() types.Type {
// This should be the type of the blank Ident; the typechecker
// doesn't provide this yet, but fortunately, we don't need it
// yet either.
panic("blank.typ is unimplemented")
}

View File

@@ -0,0 +1,248 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// This file defines utilities for population of method sets.
import (
"fmt"
"go/types"
"honnef.co/go/tools/analysis/lint"
"golang.org/x/exp/typeparams"
)
// MethodValue returns the Function implementing method sel, building
// wrapper methods on demand. It returns nil if sel denotes an
// abstract (interface) method.
//
// Precondition: sel.Kind() == MethodVal.
//
// Thread-safe.
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
//
func (prog *Program) MethodValue(sel *types.Selection) *Function {
if sel.Kind() != types.MethodVal {
panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel))
}
T := sel.Recv()
if isInterface(T) {
return nil // abstract method
}
if prog.mode&LogSource != 0 {
defer logStack("MethodValue %s %v", T, sel)()
}
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
return prog.addMethod(prog.createMethodSet(T), sel)
}
// LookupMethod returns the implementation of the method of type T
// identified by (pkg, name). It returns nil if the method exists but
// is abstract, and panics if T has no such method.
//
func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function {
sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name)
if sel == nil {
panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name)))
}
return prog.MethodValue(sel)
}
// methodSet contains the (concrete) methods of a non-interface type.
type methodSet struct {
mapping map[string]*Function // populated lazily
complete bool // mapping contains all methods
}
// Precondition: !isInterface(T).
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
func (prog *Program) createMethodSet(T types.Type) *methodSet {
mset, ok := prog.methodSets.At(T).(*methodSet)
if !ok {
mset = &methodSet{mapping: make(map[string]*Function)}
prog.methodSets.Set(T, mset)
}
return mset
}
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function {
if sel.Kind() == types.MethodExpr {
panic(sel)
}
id := sel.Obj().Id()
fn := mset.mapping[id]
if fn == nil {
obj := sel.Obj().(*types.Func)
needsPromotion := len(sel.Index()) > 1
needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv())
if needsPromotion || needsIndirection {
fn = makeWrapper(prog, sel)
} else {
fn = prog.declaredFunc(obj)
}
if fn.Signature.Recv() == nil {
panic(fn) // missing receiver
}
mset.mapping[id] = fn
}
return fn
}
// RuntimeTypes returns a new unordered slice containing all
// concrete types in the program for which a complete (non-empty)
// method set is required at run-time.
//
// Thread-safe.
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
//
func (prog *Program) RuntimeTypes() []types.Type {
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
var res []types.Type
prog.methodSets.Iterate(func(T types.Type, v interface{}) {
if v.(*methodSet).complete {
res = append(res, T)
}
})
return res
}
// declaredFunc returns the concrete function/method denoted by obj.
// Panic ensues if there is none.
func (prog *Program) declaredFunc(obj *types.Func) *Function {
if origin := typeparams.OriginMethod(obj); origin != obj {
// Calling method on instantiated type, create a wrapper that calls the generic type's method
base := prog.packageLevelValue(origin)
return makeInstance(prog, base.(*Function), obj.Type().(*types.Signature), nil)
} else {
if v := prog.packageLevelValue(obj); v != nil {
return v.(*Function)
}
}
panic("no concrete method: " + obj.String())
}
// needMethodsOf ensures that runtime type information (including the
// complete method set) is available for the specified type T and all
// its subcomponents.
//
// needMethodsOf must be called for at least every type that is an
// operand of some MakeInterface instruction, and for the type of
// every exported package member.
//
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
//
// Thread-safe. (Called via emitConv from multiple builder goroutines.)
//
// TODO(adonovan): make this faster. It accounts for 20% of SSA build time.
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
//
func (prog *Program) needMethodsOf(T types.Type) {
prog.methodsMu.Lock()
prog.needMethods(T, false)
prog.methodsMu.Unlock()
}
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
// Recursive case: skip => don't create methods for T.
//
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
//
func (prog *Program) needMethods(T types.Type, skip bool) {
// Each package maintains its own set of types it has visited.
if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok {
// needMethods(T) was previously called
if !prevSkip || skip {
return // already seen, with same or false 'skip' value
}
}
prog.runtimeTypes.Set(T, skip)
tmset := prog.MethodSets.MethodSet(T)
if !skip && !isInterface(T) && tmset.Len() > 0 {
// Create methods of T.
mset := prog.createMethodSet(T)
if !mset.complete {
mset.complete = true
n := tmset.Len()
for i := 0; i < n; i++ {
prog.addMethod(mset, tmset.At(i))
}
}
}
// Recursion over signatures of each method.
for i := 0; i < tmset.Len(); i++ {
sig := tmset.At(i).Type().(*types.Signature)
prog.needMethods(sig.Params(), false)
prog.needMethods(sig.Results(), false)
}
switch t := T.(type) {
case *types.Basic:
// nop
case *types.Interface, *typeparams.TypeParam:
// nop---handled by recursion over method set.
case *types.Pointer:
prog.needMethods(t.Elem(), false)
case *types.Slice:
prog.needMethods(t.Elem(), false)
case *types.Chan:
prog.needMethods(t.Elem(), false)
case *types.Map:
prog.needMethods(t.Key(), false)
prog.needMethods(t.Elem(), false)
case *types.Signature:
if t.Recv() != nil {
panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
}
prog.needMethods(t.Params(), false)
prog.needMethods(t.Results(), false)
case *types.Named:
// A pointer-to-named type can be derived from a named
// type via reflection. It may have methods too.
prog.needMethods(types.NewPointer(t), false)
// Consider 'type T struct{S}' where S has methods.
// Reflection provides no way to get from T to struct{S},
// only to S, so the method set of struct{S} is unwanted,
// so set 'skip' flag during recursion.
prog.needMethods(t.Underlying(), true)
case *types.Array:
prog.needMethods(t.Elem(), false)
case *types.Struct:
for i, n := 0, t.NumFields(); i < n; i++ {
prog.needMethods(t.Field(i).Type(), false)
}
case *types.Tuple:
for i, n := 0, t.Len(); i < n; i++ {
prog.needMethods(t.At(i).Type(), false)
}
default:
lint.ExhaustiveTypeSwitch(T)
}
}

105
vendor/honnef.co/go/tools/go/ir/mode.go vendored Normal file
View File

@@ -0,0 +1,105 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// This file defines the BuilderMode type and its command-line flag.
import (
"bytes"
"fmt"
)
// BuilderMode is a bitmask of options for diagnostics and checking.
//
// *BuilderMode satisfies the flag.Value interface. Example:
//
// var mode = ir.BuilderMode(0)
// func init() { flag.Var(&mode, "build", ir.BuilderModeDoc) }
//
type BuilderMode uint
const (
PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout
PrintFunctions // Print function IR code to stdout
PrintSource // Print source code when printing function IR
LogSource // Log source locations as IR builder progresses
SanityCheckFunctions // Perform sanity checking of function bodies
NaiveForm // Build naïve IR form: don't replace local loads/stores with registers
GlobalDebug // Enable debug info for all packages
SplitAfterNewInformation // Split live range after we learn something new about a value
)
const BuilderModeDoc = `Options controlling the IR builder.
The value is a sequence of zero or more of these symbols:
C perform sanity [C]hecking of the IR form.
D include [D]ebug info for every function.
P print [P]ackage inventory.
F print [F]unction IR code.
A print [A]ST nodes responsible for IR instructions
S log [S]ource locations as IR builder progresses.
N build [N]aive IR form: don't replace local loads/stores with registers.
I Split live range after a value is used as slice or array index
`
func (m BuilderMode) String() string {
var buf bytes.Buffer
if m&GlobalDebug != 0 {
buf.WriteByte('D')
}
if m&PrintPackages != 0 {
buf.WriteByte('P')
}
if m&PrintFunctions != 0 {
buf.WriteByte('F')
}
if m&PrintSource != 0 {
buf.WriteByte('A')
}
if m&LogSource != 0 {
buf.WriteByte('S')
}
if m&SanityCheckFunctions != 0 {
buf.WriteByte('C')
}
if m&NaiveForm != 0 {
buf.WriteByte('N')
}
if m&SplitAfterNewInformation != 0 {
buf.WriteByte('I')
}
return buf.String()
}
// Set parses the flag characters in s and updates *m.
func (m *BuilderMode) Set(s string) error {
var mode BuilderMode
for _, c := range s {
switch c {
case 'D':
mode |= GlobalDebug
case 'P':
mode |= PrintPackages
case 'F':
mode |= PrintFunctions
case 'A':
mode |= PrintSource
case 'S':
mode |= LogSource
case 'C':
mode |= SanityCheckFunctions
case 'N':
mode |= NaiveForm
case 'I':
mode |= SplitAfterNewInformation
default:
return fmt.Errorf("unknown BuilderMode option: %q", c)
}
}
*m = mode
return nil
}
// Get returns m.
func (m BuilderMode) Get() interface{} { return m }

482
vendor/honnef.co/go/tools/go/ir/print.go vendored Normal file
View File

@@ -0,0 +1,482 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// This file implements the String() methods for all Value and
// Instruction types.
import (
"bytes"
"fmt"
"go/types"
"io"
"reflect"
"sort"
"honnef.co/go/tools/go/types/typeutil"
)
// relName returns the name of v relative to i.
// In most cases, this is identical to v.Name(), but references to
// Functions (including methods) and Globals use RelString and
// all types are displayed with relType, so that only cross-package
// references are package-qualified.
//
func relName(v Value, i Instruction) string {
if v == nil {
return "<nil>"
}
var from *types.Package
if i != nil {
from = i.Parent().pkg()
}
switch v := v.(type) {
case Member: // *Function or *Global
return v.RelString(from)
}
return v.Name()
}
func relType(t types.Type, from *types.Package) string {
return types.TypeString(t, types.RelativeTo(from))
}
func relString(m Member, from *types.Package) string {
// NB: not all globals have an Object (e.g. init$guard),
// so use Package().Object not Object.Package().
if pkg := m.Package().Pkg; pkg != nil && pkg != from {
return fmt.Sprintf("%s.%s", pkg.Path(), m.Name())
}
return m.Name()
}
// Value.String()
//
// This method is provided only for debugging.
// It never appears in disassembly, which uses Value.Name().
func (v *Parameter) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("Parameter <%s> {%s}", relType(v.Type(), from), v.name)
}
func (v *FreeVar) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("FreeVar <%s> %s", relType(v.Type(), from), v.Name())
}
func (v *Builtin) String() string {
return fmt.Sprintf("Builtin %s", v.Name())
}
// Instruction.String()
func (v *Alloc) String() string {
from := v.Parent().pkg()
storage := "Stack"
if v.Heap {
storage = "Heap"
}
return fmt.Sprintf("%sAlloc <%s>", storage, relType(v.Type(), from))
}
func (v *Sigma) String() string {
from := v.Parent().pkg()
s := fmt.Sprintf("Sigma <%s> [b%d] %s", relType(v.Type(), from), v.From.Index, v.X.Name())
return s
}
func (v *Phi) String() string {
var b bytes.Buffer
fmt.Fprintf(&b, "Phi <%s>", v.Type())
for i, edge := range v.Edges {
b.WriteString(" ")
// Be robust against malformed CFG.
if v.block == nil {
b.WriteString("??")
continue
}
block := -1
if i < len(v.block.Preds) {
block = v.block.Preds[i].Index
}
fmt.Fprintf(&b, "%d:", block)
edgeVal := "<nil>" // be robust
if edge != nil {
edgeVal = relName(edge, v)
}
b.WriteString(edgeVal)
}
return b.String()
}
func printCall(v *CallCommon, prefix string, instr Instruction) string {
var b bytes.Buffer
if !v.IsInvoke() {
if value, ok := instr.(Value); ok {
fmt.Fprintf(&b, "%s <%s> %s", prefix, relType(value.Type(), instr.Parent().pkg()), relName(v.Value, instr))
} else {
fmt.Fprintf(&b, "%s %s", prefix, relName(v.Value, instr))
}
} else {
if value, ok := instr.(Value); ok {
fmt.Fprintf(&b, "%sInvoke <%s> %s.%s", prefix, relType(value.Type(), instr.Parent().pkg()), relName(v.Value, instr), v.Method.Name())
} else {
fmt.Fprintf(&b, "%sInvoke %s.%s", prefix, relName(v.Value, instr), v.Method.Name())
}
}
for _, arg := range v.TypeArgs {
b.WriteString(" ")
b.WriteString(relType(arg, instr.Parent().pkg()))
}
for _, arg := range v.Args {
b.WriteString(" ")
b.WriteString(relName(arg, instr))
}
return b.String()
}
func (c *CallCommon) String() string {
return printCall(c, "", nil)
}
func (v *Call) String() string {
return printCall(&v.Call, "Call", v)
}
func (v *BinOp) String() string {
return fmt.Sprintf("BinOp <%s> {%s} %s %s", relType(v.Type(), v.Parent().pkg()), v.Op.String(), relName(v.X, v), relName(v.Y, v))
}
func (v *UnOp) String() string {
return fmt.Sprintf("UnOp <%s> {%s} %s", relType(v.Type(), v.Parent().pkg()), v.Op.String(), relName(v.X, v))
}
func (v *Load) String() string {
return fmt.Sprintf("Load <%s> %s", relType(v.Type(), v.Parent().pkg()), relName(v.X, v))
}
func (v *Copy) String() string {
return fmt.Sprintf("Copy <%s> %s", relType(v.Type(), v.Parent().pkg()), relName(v.X, v))
}
func printConv(prefix string, v, x Value) string {
from := v.Parent().pkg()
return fmt.Sprintf("%s <%s> %s",
prefix,
relType(v.Type(), from),
relName(x, v.(Instruction)))
}
func (v *ChangeType) String() string { return printConv("ChangeType", v, v.X) }
func (v *Convert) String() string { return printConv("Convert", v, v.X) }
func (v *ChangeInterface) String() string { return printConv("ChangeInterface", v, v.X) }
func (v *SliceToArrayPointer) String() string { return printConv("SliceToArrayPointer", v, v.X) }
func (v *MakeInterface) String() string { return printConv("MakeInterface", v, v.X) }
func (v *MakeClosure) String() string {
from := v.Parent().pkg()
var b bytes.Buffer
fmt.Fprintf(&b, "MakeClosure <%s> %s", relType(v.Type(), from), relName(v.Fn, v))
if v.Bindings != nil {
for _, c := range v.Bindings {
b.WriteString(" ")
b.WriteString(relName(c, v))
}
}
return b.String()
}
func (v *MakeSlice) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("MakeSlice <%s> %s %s",
relType(v.Type(), from),
relName(v.Len, v),
relName(v.Cap, v))
}
func (v *Slice) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("Slice <%s> %s %s %s %s",
relType(v.Type(), from), relName(v.X, v), relName(v.Low, v), relName(v.High, v), relName(v.Max, v))
}
func (v *MakeMap) String() string {
res := ""
if v.Reserve != nil {
res = relName(v.Reserve, v)
}
from := v.Parent().pkg()
return fmt.Sprintf("MakeMap <%s> %s", relType(v.Type(), from), res)
}
func (v *MakeChan) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("MakeChan <%s> %s", relType(v.Type(), from), relName(v.Size, v))
}
func (v *FieldAddr) String() string {
from := v.Parent().pkg()
// v.X.Type() might be a pointer to a type parameter whose core type is a pointer to a struct
st := deref(typeutil.CoreType(deref(v.X.Type()))).Underlying().(*types.Struct)
// Be robust against a bad index.
name := "?"
if 0 <= v.Field && v.Field < st.NumFields() {
name = st.Field(v.Field).Name()
}
return fmt.Sprintf("FieldAddr <%s> [%d] (%s) %s", relType(v.Type(), from), v.Field, name, relName(v.X, v))
}
func (v *Field) String() string {
st := typeutil.CoreType(v.X.Type()).Underlying().(*types.Struct)
// Be robust against a bad index.
name := "?"
if 0 <= v.Field && v.Field < st.NumFields() {
name = st.Field(v.Field).Name()
}
from := v.Parent().pkg()
return fmt.Sprintf("Field <%s> [%d] (%s) %s", relType(v.Type(), from), v.Field, name, relName(v.X, v))
}
func (v *IndexAddr) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("IndexAddr <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v))
}
func (v *Index) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("Index <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v))
}
func (v *MapLookup) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("MapLookup <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v))
}
func (v *StringLookup) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("StringLookup <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v))
}
func (v *Range) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("Range <%s> %s", relType(v.Type(), from), relName(v.X, v))
}
func (v *Next) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("Next <%s> %s", relType(v.Type(), from), relName(v.Iter, v))
}
func (v *TypeAssert) String() string {
from := v.Parent().pkg()
return fmt.Sprintf("TypeAssert <%s> %s", relType(v.Type(), from), relName(v.X, v))
}
func (v *Extract) String() string {
from := v.Parent().pkg()
name := v.Tuple.Type().(*types.Tuple).At(v.Index).Name()
return fmt.Sprintf("Extract <%s> [%d] (%s) %s", relType(v.Type(), from), v.Index, name, relName(v.Tuple, v))
}
func (s *Jump) String() string {
// Be robust against malformed CFG.
block := -1
if s.block != nil && len(s.block.Succs) == 1 {
block = s.block.Succs[0].Index
}
str := fmt.Sprintf("Jump → b%d", block)
if s.Comment != "" {
str = fmt.Sprintf("%s # %s", str, s.Comment)
}
return str
}
func (s *Unreachable) String() string {
// Be robust against malformed CFG.
block := -1
if s.block != nil && len(s.block.Succs) == 1 {
block = s.block.Succs[0].Index
}
return fmt.Sprintf("Unreachable → b%d", block)
}
func (s *If) String() string {
// Be robust against malformed CFG.
tblock, fblock := -1, -1
if s.block != nil && len(s.block.Succs) == 2 {
tblock = s.block.Succs[0].Index
fblock = s.block.Succs[1].Index
}
return fmt.Sprintf("If %s → b%d b%d", relName(s.Cond, s), tblock, fblock)
}
func (s *ConstantSwitch) String() string {
var b bytes.Buffer
fmt.Fprintf(&b, "ConstantSwitch %s", relName(s.Tag, s))
for _, cond := range s.Conds {
fmt.Fprintf(&b, " %s", relName(cond, s))
}
fmt.Fprint(&b, " →")
for _, succ := range s.block.Succs {
fmt.Fprintf(&b, " b%d", succ.Index)
}
return b.String()
}
func (s *TypeSwitch) String() string {
from := s.Parent().pkg()
var b bytes.Buffer
fmt.Fprintf(&b, "TypeSwitch <%s> %s", relType(s.typ, from), relName(s.Tag, s))
for _, cond := range s.Conds {
fmt.Fprintf(&b, " %q", relType(cond, s.block.parent.pkg()))
}
return b.String()
}
func (s *Go) String() string {
return printCall(&s.Call, "Go", s)
}
func (s *Panic) String() string {
// Be robust against malformed CFG.
block := -1
if s.block != nil && len(s.block.Succs) == 1 {
block = s.block.Succs[0].Index
}
return fmt.Sprintf("Panic %s → b%d", relName(s.X, s), block)
}
func (s *Return) String() string {
var b bytes.Buffer
b.WriteString("Return")
for _, r := range s.Results {
b.WriteString(" ")
b.WriteString(relName(r, s))
}
return b.String()
}
func (*RunDefers) String() string {
return "RunDefers"
}
func (s *Send) String() string {
return fmt.Sprintf("Send %s %s", relName(s.Chan, s), relName(s.X, s))
}
func (recv *Recv) String() string {
from := recv.Parent().pkg()
return fmt.Sprintf("Recv <%s> %s", relType(recv.Type(), from), relName(recv.Chan, recv))
}
func (s *Defer) String() string {
return printCall(&s.Call, "Defer", s)
}
func (s *Select) String() string {
var b bytes.Buffer
for i, st := range s.States {
if i > 0 {
b.WriteString(", ")
}
if st.Dir == types.RecvOnly {
b.WriteString("<-")
b.WriteString(relName(st.Chan, s))
} else {
b.WriteString(relName(st.Chan, s))
b.WriteString("<-")
b.WriteString(relName(st.Send, s))
}
}
non := ""
if !s.Blocking {
non = "Non"
}
from := s.Parent().pkg()
return fmt.Sprintf("Select%sBlocking <%s> [%s]", non, relType(s.Type(), from), b.String())
}
func (s *Store) String() string {
return fmt.Sprintf("Store {%s} %s %s",
s.Val.Type(), relName(s.Addr, s), relName(s.Val, s))
}
func (s *BlankStore) String() string {
return fmt.Sprintf("BlankStore %s", relName(s.Val, s))
}
func (s *MapUpdate) String() string {
return fmt.Sprintf("MapUpdate %s %s %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s))
}
func (s *DebugRef) String() string {
p := s.Parent().Prog.Fset.Position(s.Pos())
var descr interface{}
if s.object != nil {
descr = s.object // e.g. "var x int"
} else {
descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr"
}
var addr string
if s.IsAddr {
addr = "address of "
}
return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name())
}
func (p *Package) String() string {
return "package " + p.Pkg.Path()
}
var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer
func (p *Package) WriteTo(w io.Writer) (int64, error) {
var buf bytes.Buffer
WritePackage(&buf, p)
n, err := w.Write(buf.Bytes())
return int64(n), err
}
// WritePackage writes to buf a human-readable summary of p.
func WritePackage(buf *bytes.Buffer, p *Package) {
fmt.Fprintf(buf, "%s:\n", p)
var names []string
maxname := 0
for name := range p.Members {
if l := len(name); l > maxname {
maxname = l
}
names = append(names, name)
}
from := p.Pkg
sort.Strings(names)
for _, name := range names {
switch mem := p.Members[name].(type) {
case *NamedConst:
fmt.Fprintf(buf, " const %-*s %s = %s\n",
maxname, name, mem.Name(), mem.Value.RelString(from))
case *Function:
fmt.Fprintf(buf, " func %-*s %s\n",
maxname, name, relType(mem.Type(), from))
case *Type:
fmt.Fprintf(buf, " type %-*s %s\n",
maxname, name, relType(mem.Type().Underlying(), from))
for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) {
fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from)))
}
case *Global:
fmt.Fprintf(buf, " var %-*s %s\n",
maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from))
}
}
fmt.Fprintf(buf, "\n")
}

View File

@@ -0,0 +1,556 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// An optional pass for sanity-checking invariants of the IR representation.
// Currently it checks CFG invariants but little at the instruction level.
import (
"fmt"
"go/types"
"io"
"os"
"strings"
)
type sanity struct {
reporter io.Writer
fn *Function
block *BasicBlock
instrs map[Instruction]struct{}
insane bool
}
// sanityCheck performs integrity checking of the IR representation
// of the function fn and returns true if it was valid. Diagnostics
// are written to reporter if non-nil, os.Stderr otherwise. Some
// diagnostics are only warnings and do not imply a negative result.
//
// Sanity-checking is intended to facilitate the debugging of code
// transformation passes.
//
func sanityCheck(fn *Function, reporter io.Writer) bool {
if reporter == nil {
reporter = os.Stderr
}
return (&sanity{reporter: reporter}).checkFunction(fn)
}
// mustSanityCheck is like sanityCheck but panics instead of returning
// a negative result.
//
func mustSanityCheck(fn *Function, reporter io.Writer) {
if !sanityCheck(fn, reporter) {
fn.WriteTo(os.Stderr)
panic("SanityCheck failed")
}
}
func (s *sanity) diagnostic(prefix, format string, args ...interface{}) {
fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn)
if s.block != nil {
fmt.Fprintf(s.reporter, ", block %s", s.block)
}
io.WriteString(s.reporter, ": ")
fmt.Fprintf(s.reporter, format, args...)
io.WriteString(s.reporter, "\n")
}
func (s *sanity) errorf(format string, args ...interface{}) {
s.insane = true
s.diagnostic("Error", format, args...)
}
func (s *sanity) warnf(format string, args ...interface{}) {
s.diagnostic("Warning", format, args...)
}
// findDuplicate returns an arbitrary basic block that appeared more
// than once in blocks, or nil if all were unique.
func findDuplicate(blocks []*BasicBlock) *BasicBlock {
if len(blocks) < 2 {
return nil
}
if blocks[0] == blocks[1] {
return blocks[0]
}
// Slow path:
m := make(map[*BasicBlock]bool)
for _, b := range blocks {
if m[b] {
return b
}
m[b] = true
}
return nil
}
func (s *sanity) checkInstr(idx int, instr Instruction) {
switch instr := instr.(type) {
case *If, *Jump, *Return, *Panic, *Unreachable, *ConstantSwitch:
s.errorf("control flow instruction not at end of block")
case *Sigma:
if idx > 0 {
prev := s.block.Instrs[idx-1]
if _, ok := prev.(*Sigma); !ok {
s.errorf("Sigma instruction follows a non-Sigma: %T", prev)
}
}
case *Phi:
if idx == 0 {
// It suffices to apply this check to just the first phi node.
if dup := findDuplicate(s.block.Preds); dup != nil {
s.errorf("phi node in block with duplicate predecessor %s", dup)
}
} else {
prev := s.block.Instrs[idx-1]
switch prev.(type) {
case *Phi, *Sigma:
default:
s.errorf("Phi instruction follows a non-Phi, non-Sigma: %T", prev)
}
}
if ne, np := len(instr.Edges), len(s.block.Preds); ne != np {
s.errorf("phi node has %d edges but %d predecessors", ne, np)
} else {
for i, e := range instr.Edges {
if e == nil {
s.errorf("phi node '%v' has no value for edge #%d from %s", instr, i, s.block.Preds[i])
}
}
}
case *Alloc:
if !instr.Heap {
found := false
for _, l := range s.fn.Locals {
if l == instr {
found = true
break
}
}
if !found {
s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr)
}
}
case *BinOp:
case *Call:
case *ChangeInterface:
case *ChangeType:
case *SliceToArrayPointer:
case *Convert:
if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok {
if _, ok := instr.Type().Underlying().(*types.Basic); !ok {
s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type())
}
}
case *Defer:
case *Extract:
case *Field:
case *FieldAddr:
case *Go:
case *Index:
case *IndexAddr:
case *MapLookup:
case *StringLookup:
case *MakeChan:
case *MakeClosure:
numFree := len(instr.Fn.(*Function).FreeVars)
numBind := len(instr.Bindings)
if numFree != numBind {
s.errorf("MakeClosure has %d Bindings for function %s with %d free vars",
numBind, instr.Fn, numFree)
}
if recv := instr.Type().(*types.Signature).Recv(); recv != nil {
s.errorf("MakeClosure's type includes receiver %s", recv.Type())
}
case *MakeInterface:
case *MakeMap:
case *MakeSlice:
case *MapUpdate:
case *Next:
case *Range:
case *RunDefers:
case *Select:
case *Send:
case *Slice:
case *Store:
case *TypeAssert:
case *UnOp:
case *DebugRef:
case *BlankStore:
case *Load:
case *Parameter:
case *Const:
case *AggregateConst:
case *ArrayConst:
case *GenericConst:
case *Recv:
case *TypeSwitch:
default:
panic(fmt.Sprintf("Unknown instruction type: %T", instr))
}
if call, ok := instr.(CallInstruction); ok {
if call.Common().Signature() == nil {
s.errorf("nil signature: %s", call)
}
}
// Check that value-defining instructions have valid types
// and a valid referrer list.
if v, ok := instr.(Value); ok {
t := v.Type()
if t == nil {
s.errorf("no type: %s = %s", v.Name(), v)
} else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 {
if _, ok := v.(*Const); !ok {
s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t)
}
}
s.checkReferrerList(v)
}
// Untyped constants are legal as instruction Operands(),
// for example:
// _ = "foo"[0]
// or:
// if wordsize==64 {...}
// All other non-Instruction Values can be found via their
// enclosing Function or Package.
}
func (s *sanity) checkFinalInstr(instr Instruction) {
switch instr := instr.(type) {
case *If:
if nsuccs := len(s.block.Succs); nsuccs != 2 {
s.errorf("If-terminated block has %d successors; expected 2", nsuccs)
return
}
if s.block.Succs[0] == s.block.Succs[1] {
s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0])
return
}
case *Jump:
if nsuccs := len(s.block.Succs); nsuccs != 1 {
s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs)
return
}
case *Return:
if nsuccs := len(s.block.Succs); nsuccs != 0 {
s.errorf("Return-terminated block has %d successors; expected none", nsuccs)
return
}
if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na {
s.errorf("%d-ary return in %d-ary function", na, nf)
}
case *Panic:
if nsuccs := len(s.block.Succs); nsuccs != 1 {
s.errorf("Panic-terminated block has %d successors; expected one", nsuccs)
return
}
case *Unreachable:
if nsuccs := len(s.block.Succs); nsuccs != 1 {
s.errorf("Unreachable-terminated block has %d successors; expected one", nsuccs)
return
}
case *ConstantSwitch:
default:
s.errorf("non-control flow instruction at end of block")
}
}
func (s *sanity) checkBlock(b *BasicBlock, index int) {
s.block = b
if b.Index != index {
s.errorf("block has incorrect Index %d", b.Index)
}
if b.parent != s.fn {
s.errorf("block has incorrect parent %s", b.parent)
}
// Check all blocks are reachable.
// (The entry block is always implicitly reachable, the exit block may be unreachable.)
if index > 1 && len(b.Preds) == 0 {
s.warnf("unreachable block")
if b.Instrs == nil {
// Since this block is about to be pruned,
// tolerating transient problems in it
// simplifies other optimizations.
return
}
}
// Check predecessor and successor relations are dual,
// and that all blocks in CFG belong to same function.
for _, a := range b.Preds {
found := false
for _, bb := range a.Succs {
if bb == b {
found = true
break
}
}
if !found {
s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs)
}
if a.parent != s.fn {
s.errorf("predecessor %s belongs to different function %s", a, a.parent)
}
}
for _, c := range b.Succs {
found := false
for _, bb := range c.Preds {
if bb == b {
found = true
break
}
}
if !found {
s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds)
}
if c.parent != s.fn {
s.errorf("successor %s belongs to different function %s", c, c.parent)
}
}
// Check each instruction is sane.
n := len(b.Instrs)
if n == 0 {
s.errorf("basic block contains no instructions")
}
var rands [10]*Value // reuse storage
for j, instr := range b.Instrs {
if instr == nil {
s.errorf("nil instruction at index %d", j)
continue
}
if b2 := instr.Block(); b2 == nil {
s.errorf("nil Block() for instruction at index %d", j)
continue
} else if b2 != b {
s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j)
continue
}
if j < n-1 {
s.checkInstr(j, instr)
} else {
s.checkFinalInstr(instr)
}
// Check Instruction.Operands.
operands:
for i, op := range instr.Operands(rands[:0]) {
if op == nil {
s.errorf("nil operand pointer %d of %s", i, instr)
continue
}
val := *op
if val == nil {
continue // a nil operand is ok
}
// Check that "untyped" types only appear on constant operands.
if _, ok := (*op).(*Const); !ok {
if basic, ok := (*op).Type().(*types.Basic); ok {
if basic.Info()&types.IsUntyped != 0 {
s.errorf("operand #%d of %s is untyped: %s", i, instr, basic)
}
}
}
// Check that Operands that are also Instructions belong to same function.
// TODO(adonovan): also check their block dominates block b.
if val, ok := val.(Instruction); ok {
if val.Block() == nil {
s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val)
} else if val.Parent() != s.fn {
s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent())
}
}
// Check that each function-local operand of
// instr refers back to instr. (NB: quadratic)
switch val := val.(type) {
case *Const, *Global, *Builtin:
continue // not local
case *Function:
if val.parent == nil {
continue // only anon functions are local
}
}
// TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined.
if refs := val.Referrers(); refs != nil {
for _, ref := range *refs {
if ref == instr {
continue operands
}
}
s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val)
} else {
s.errorf("operand %d of %s (%s) has no referrers", i, instr, val)
}
}
}
}
func (s *sanity) checkReferrerList(v Value) {
refs := v.Referrers()
if refs == nil {
s.errorf("%s has missing referrer list", v.Name())
return
}
for i, ref := range *refs {
if _, ok := s.instrs[ref]; !ok {
if val, ok := ref.(Value); ok {
s.errorf("%s.Referrers()[%d] = %s = %s is not an instruction belonging to this function", v.Name(), i, val.Name(), val)
} else {
s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref)
}
}
}
}
func (s *sanity) checkFunction(fn *Function) bool {
// TODO(adonovan): check Function invariants:
// - check params match signature
// - check transient fields are nil
// - warn if any fn.Locals do not appear among block instructions.
s.fn = fn
if fn.Prog == nil {
s.errorf("nil Prog")
}
_ = fn.String() // must not crash
_ = fn.RelString(fn.pkg()) // must not crash
// All functions have a package, except delegates (which are
// shared across packages, or duplicated as weak symbols in a
// separate-compilation model), and error.Error.
if fn.Pkg == nil {
switch fn.Synthetic {
case SyntheticWrapper, SyntheticBound, SyntheticThunk, SyntheticGeneric:
default:
if !strings.HasSuffix(fn.name, "Error") {
s.errorf("nil Pkg")
}
}
}
if src, syn := fn.Synthetic == 0, fn.source != nil; src != syn {
s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn)
}
for i, l := range fn.Locals {
if l.Parent() != fn {
s.errorf("Local %s at index %d has wrong parent", l.Name(), i)
}
if l.Heap {
s.errorf("Local %s at index %d has Heap flag set", l.Name(), i)
}
}
// Build the set of valid referrers.
s.instrs = make(map[Instruction]struct{})
for _, b := range fn.Blocks {
for _, instr := range b.Instrs {
s.instrs[instr] = struct{}{}
}
}
for i, p := range fn.Params {
if p.Parent() != fn {
s.errorf("Param %s at index %d has wrong parent", p.Name(), i)
}
// Check common suffix of Signature and Params match type.
if sig := fn.Signature; sig != nil {
j := i - len(fn.Params) + sig.Params().Len() // index within sig.Params
if j < 0 {
continue
}
if !types.Identical(p.Type(), sig.Params().At(j).Type()) {
s.errorf("Param %s at index %d has wrong type (%s, versus %s in Signature)", p.Name(), i, p.Type(), sig.Params().At(j).Type())
}
}
s.checkReferrerList(p)
}
for i, fv := range fn.FreeVars {
if fv.Parent() != fn {
s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i)
}
s.checkReferrerList(fv)
}
if fn.Blocks != nil && len(fn.Blocks) == 0 {
// Function _had_ blocks (so it's not external) but
// they were "optimized" away, even the entry block.
s.errorf("Blocks slice is non-nil but empty")
}
for i, b := range fn.Blocks {
if b == nil {
s.warnf("nil *BasicBlock at f.Blocks[%d]", i)
continue
}
s.checkBlock(b, i)
}
s.block = nil
for i, anon := range fn.AnonFuncs {
if anon.Parent() != fn {
s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent())
}
}
s.fn = nil
return !s.insane
}
// sanityCheckPackage checks invariants of packages upon creation.
// It does not require that the package is built.
// Unlike sanityCheck (for functions), it just panics at the first error.
func sanityCheckPackage(pkg *Package) {
if pkg.Pkg == nil {
panic(fmt.Sprintf("Package %s has no Object", pkg))
}
_ = pkg.String() // must not crash
for name, mem := range pkg.Members {
if name != mem.Name() {
panic(fmt.Sprintf("%s: %T.Name() = %s, want %s",
pkg.Pkg.Path(), mem, mem.Name(), name))
}
obj := mem.Object()
if obj == nil {
// This check is sound because fields
// {Global,Function}.object have type
// types.Object. (If they were declared as
// *types.{Var,Func}, we'd have a non-empty
// interface containing a nil pointer.)
continue // not all members have typechecker objects
}
if obj.Name() != name {
if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") {
// Ok. The name of a declared init function varies between
// its types.Func ("init") and its ir.Function ("init#%d").
} else {
panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s",
pkg.Pkg.Path(), mem, obj.Name(), name))
}
}
}
}

View File

@@ -0,0 +1,273 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// This file defines utilities for working with source positions
// or source-level named entities ("objects").
// TODO(adonovan): test that {Value,Instruction}.Pos() positions match
// the originating syntax, as specified.
import (
"go/ast"
"go/token"
"go/types"
"golang.org/x/exp/typeparams"
)
// EnclosingFunction returns the function that contains the syntax
// node denoted by path.
//
// Syntax associated with package-level variable specifications is
// enclosed by the package's init() function.
//
// Returns nil if not found; reasons might include:
// - the node is not enclosed by any function.
// - the node is within an anonymous function (FuncLit) and
// its IR function has not been created yet
// (pkg.Build() has not yet been called).
//
func EnclosingFunction(pkg *Package, path []ast.Node) *Function {
// Start with package-level function...
fn := findEnclosingPackageLevelFunction(pkg, path)
if fn == nil {
return nil // not in any function
}
// ...then walk down the nested anonymous functions.
n := len(path)
outer:
for i := range path {
if lit, ok := path[n-1-i].(*ast.FuncLit); ok {
for _, anon := range fn.AnonFuncs {
if anon.Pos() == lit.Type.Func {
fn = anon
continue outer
}
}
// IR function not found:
// - package not yet built, or maybe
// - builder skipped FuncLit in dead block
// (in principle; but currently the Builder
// generates even dead FuncLits).
return nil
}
}
return fn
}
// HasEnclosingFunction returns true if the AST node denoted by path
// is contained within the declaration of some function or
// package-level variable.
//
// Unlike EnclosingFunction, the behaviour of this function does not
// depend on whether IR code for pkg has been built, so it can be
// used to quickly reject check inputs that will cause
// EnclosingFunction to fail, prior to IR building.
//
func HasEnclosingFunction(pkg *Package, path []ast.Node) bool {
return findEnclosingPackageLevelFunction(pkg, path) != nil
}
// findEnclosingPackageLevelFunction returns the Function
// corresponding to the package-level function enclosing path.
//
func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function {
if n := len(path); n >= 2 { // [... {Gen,Func}Decl File]
switch decl := path[n-2].(type) {
case *ast.GenDecl:
if decl.Tok == token.VAR && n >= 3 {
// Package-level 'var' initializer.
return pkg.init
}
case *ast.FuncDecl:
// Declared function/method.
fn := findNamedFunc(pkg, decl.Pos())
if fn == nil && decl.Recv == nil && decl.Name.Name == "init" {
// Hack: return non-nil when IR is not yet
// built so that HasEnclosingFunction works.
return pkg.init
}
return fn
}
}
return nil // not in any function
}
// findNamedFunc returns the named function whose FuncDecl.Ident is at
// position pos.
//
func findNamedFunc(pkg *Package, pos token.Pos) *Function {
for _, fn := range pkg.Functions {
if fn.Pos() == pos {
return fn
}
}
return nil
}
// ValueForExpr returns the IR Value that corresponds to non-constant
// expression e.
//
// It returns nil if no value was found, e.g.
// - the expression is not lexically contained within f;
// - f was not built with debug information; or
// - e is a constant expression. (For efficiency, no debug
// information is stored for constants. Use
// go/types.Info.Types[e].Value instead.)
// - e is a reference to nil or a built-in function.
// - the value was optimised away.
//
// If e is an addressable expression used in an lvalue context,
// value is the address denoted by e, and isAddr is true.
//
// The types of e (or &e, if isAddr) and the result are equal
// (modulo "untyped" bools resulting from comparisons).
//
// (Tip: to find the ir.Value given a source position, use
// astutil.PathEnclosingInterval to locate the ast.Node, then
// EnclosingFunction to locate the Function, then ValueForExpr to find
// the ir.Value.)
//
func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) {
if f.debugInfo() { // (opt)
e = unparen(e)
for _, b := range f.Blocks {
for _, instr := range b.Instrs {
if ref, ok := instr.(*DebugRef); ok {
if ref.Expr == e {
return ref.X, ref.IsAddr
}
}
}
}
}
return
}
// --- Lookup functions for source-level named entities (types.Objects) ---
// Package returns the IR Package corresponding to the specified
// type-checker package object.
// It returns nil if no such IR package has been created.
//
func (prog *Program) Package(obj *types.Package) *Package {
return prog.packages[obj]
}
// packageLevelValue returns the package-level value corresponding to
// the specified named object, which may be a package-level const
// (*Const), var (*Global) or func (*Function) of some package in
// prog. It returns nil if the object is not found.
//
func (prog *Program) packageLevelValue(obj types.Object) Value {
if pkg, ok := prog.packages[obj.Pkg()]; ok {
return pkg.values[obj]
}
return nil
}
// FuncValue returns the concrete Function denoted by the source-level
// named function obj, or nil if obj denotes an interface method.
//
// TODO(adonovan): check the invariant that obj.Type() matches the
// result's Signature, both in the params/results and in the receiver.
//
func (prog *Program) FuncValue(obj *types.Func) *Function {
obj = typeparams.OriginMethod(obj)
fn, _ := prog.packageLevelValue(obj).(*Function)
return fn
}
// ConstValue returns the IR Value denoted by the source-level named
// constant obj.
//
func (prog *Program) ConstValue(obj *types.Const) *Const {
// TODO(adonovan): opt: share (don't reallocate)
// Consts for const objects and constant ast.Exprs.
// Universal constant? {true,false,nil}
if obj.Parent() == types.Universe {
return NewConst(obj.Val(), obj.Type())
}
// Package-level named constant?
if v := prog.packageLevelValue(obj); v != nil {
return v.(*Const)
}
return NewConst(obj.Val(), obj.Type())
}
// VarValue returns the IR Value that corresponds to a specific
// identifier denoting the source-level named variable obj.
//
// VarValue returns nil if a local variable was not found, perhaps
// because its package was not built, the debug information was not
// requested during IR construction, or the value was optimized away.
//
// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval),
// and that ident must resolve to obj.
//
// pkg is the package enclosing the reference. (A reference to a var
// always occurs within a function, so we need to know where to find it.)
//
// If the identifier is a field selector and its base expression is
// non-addressable, then VarValue returns the value of that field.
// For example:
// func f() struct {x int}
// f().x // VarValue(x) returns a *Field instruction of type int
//
// All other identifiers denote addressable locations (variables).
// For them, VarValue may return either the variable's address or its
// value, even when the expression is evaluated only for its value; the
// situation is reported by isAddr, the second component of the result.
//
// If !isAddr, the returned value is the one associated with the
// specific identifier. For example,
// var x int // VarValue(x) returns Const 0 here
// x = 1 // VarValue(x) returns Const 1 here
//
// It is not specified whether the value or the address is returned in
// any particular case, as it may depend upon optimizations performed
// during IR code generation, such as registerization, constant
// folding, avoidance of materialization of subexpressions, etc.
//
func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) {
// All references to a var are local to some function, possibly init.
fn := EnclosingFunction(pkg, ref)
if fn == nil {
return // e.g. def of struct field; IR not built?
}
id := ref[0].(*ast.Ident)
// Defining ident of a parameter?
if id.Pos() == obj.Pos() {
for _, param := range fn.Params {
if param.Object() == obj {
return param, false
}
}
}
// Other ident?
for _, b := range fn.Blocks {
for _, instr := range b.Instrs {
if dr, ok := instr.(*DebugRef); ok {
if dr.Pos() == id.Pos() {
return dr.X, dr.IsAddr
}
}
}
}
// Defining ident of package-level var?
if v := prog.packageLevelValue(obj); v != nil {
return v.(*Global), true
}
return // e.g. debug info not requested, or var optimized away
}

2054
vendor/honnef.co/go/tools/go/ir/ssa.go vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,3 @@
# ssa/... is mostly imported from upstream and we don't want to
# deviate from it too much, hence disabling SA1019
checks = ["inherit", "-SA1019"]

149
vendor/honnef.co/go/tools/go/ir/util.go vendored Normal file
View File

@@ -0,0 +1,149 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// This file defines a number of miscellaneous utility functions.
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"io"
"os"
"honnef.co/go/tools/go/ast/astutil"
"honnef.co/go/tools/go/types/typeutil"
"golang.org/x/exp/typeparams"
)
//// AST utilities
func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
// isBlankIdent returns true iff e is an Ident with name "_".
// They have no associated types.Object, and thus no type.
//
func isBlankIdent(e ast.Expr) bool {
id, ok := e.(*ast.Ident)
return ok && id.Name == "_"
}
//// Type utilities. Some of these belong in go/types.
// isPointer returns true for types whose underlying type is a pointer,
// and for type parameters whose core type is a pointer.
func isPointer(typ types.Type) bool {
if ctyp := typeutil.CoreType(typ); ctyp != nil {
_, ok := ctyp.(*types.Pointer)
return ok
}
_, ok := typ.Underlying().(*types.Pointer)
return ok
}
func isInterface(T types.Type) bool { return types.IsInterface(T) }
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
orig := typ
if t, ok := typ.(*typeparams.TypeParam); ok {
if ctyp := typeutil.CoreType(t); ctyp != nil {
typ = ctyp
}
}
if p, ok := typ.Underlying().(*types.Pointer); ok {
return p.Elem()
}
return orig
}
// recvType returns the receiver type of method obj.
func recvType(obj *types.Func) types.Type {
return obj.Type().(*types.Signature).Recv().Type()
}
// logStack prints the formatted "start" message to stderr and
// returns a closure that prints the corresponding "end" message.
// Call using 'defer logStack(...)()' to show builder stack on panic.
// Don't forget trailing parens!
//
func logStack(format string, args ...interface{}) func() {
msg := fmt.Sprintf(format, args...)
io.WriteString(os.Stderr, msg)
io.WriteString(os.Stderr, "\n")
return func() {
io.WriteString(os.Stderr, msg)
io.WriteString(os.Stderr, " end\n")
}
}
// newVar creates a 'var' for use in a types.Tuple.
func newVar(name string, typ types.Type) *types.Var {
return types.NewParam(token.NoPos, nil, name, typ)
}
// anonVar creates an anonymous 'var' for use in a types.Tuple.
func anonVar(typ types.Type) *types.Var {
return newVar("", typ)
}
var lenResults = types.NewTuple(anonVar(tInt))
// makeLen returns the len builtin specialized to type func(T)int.
func makeLen(T types.Type) *Builtin {
lenParams := types.NewTuple(anonVar(T))
return &Builtin{
name: "len",
sig: types.NewSignature(nil, lenParams, lenResults, false),
}
}
type StackMap struct {
m []map[Value]Value
}
func (m *StackMap) Push() {
m.m = append(m.m, map[Value]Value{})
}
func (m *StackMap) Pop() {
m.m = m.m[:len(m.m)-1]
}
func (m *StackMap) Get(key Value) (Value, bool) {
for i := len(m.m) - 1; i >= 0; i-- {
if v, ok := m.m[i][key]; ok {
return v, true
}
}
return nil, false
}
func (m *StackMap) Set(k Value, v Value) {
m.m[len(m.m)-1][k] = v
}
// Unwrap recursively unwraps Sigma and Copy nodes.
func Unwrap(v Value) Value {
for {
switch vv := v.(type) {
case *Sigma:
v = vv.X
case *Copy:
v = vv.X
default:
return v
}
}
}
func assert(x bool) {
if !x {
panic("failed assertion")
}
}

View File

@@ -0,0 +1,387 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
// This file defines synthesis of Functions that delegate to declared
// methods; they come in three kinds:
//
// (1) wrappers: methods that wrap declared methods, performing
// implicit pointer indirections and embedded field selections.
//
// (2) thunks: funcs that wrap declared methods. Like wrappers,
// thunks perform indirections and field selections. The thunk's
// first parameter is used as the receiver for the method call.
//
// (3) bounds: funcs that wrap declared methods. The bound's sole
// free variable, supplied by a closure, is used as the receiver
// for the method call. No indirections or field selections are
// performed since they can be done before the call.
import (
"fmt"
"go/types"
"golang.org/x/exp/typeparams"
)
// -- wrappers -----------------------------------------------------------
// makeWrapper returns a synthetic method that delegates to the
// declared method denoted by meth.Obj(), first performing any
// necessary pointer indirections or field selections implied by meth.
//
// The resulting method's receiver type is meth.Recv().
//
// This function is versatile but quite subtle! Consider the
// following axes of variation when making changes:
// - optional receiver indirection
// - optional implicit field selections
// - meth.Obj() may denote a concrete or an interface method
// - the result may be a thunk or a wrapper.
//
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
//
func makeWrapper(prog *Program, sel *types.Selection) *Function {
obj := sel.Obj().(*types.Func) // the declared function
sig := sel.Type().(*types.Signature) // type of this wrapper
var recv *types.Var // wrapper's receiver or thunk's params[0]
name := obj.Name()
var description Synthetic
var start int // first regular param
if sel.Kind() == types.MethodExpr {
name += "$thunk"
description = SyntheticThunk
recv = sig.Params().At(0)
start = 1
} else {
description = SyntheticWrapper
recv = sig.Recv()
}
if prog.mode&LogSource != 0 {
defer logStack("make %s to (%s)", description, recv.Type())()
}
fn := &Function{
name: name,
method: sel,
object: obj,
Signature: sig,
Synthetic: description,
Prog: prog,
functionBody: new(functionBody),
}
fn.initHTML(prog.PrintFunc)
fn.startBody()
fn.addSpilledParam(recv, nil)
createParams(fn, start)
indices := sel.Index()
var v Value = fn.Locals[0] // spilled receiver
if isPointer(sel.Recv()) {
v = emitLoad(fn, v, nil)
// For simple indirection wrappers, perform an informative nil-check:
// "value method (T).f called using nil *T pointer"
if len(indices) == 1 && !isPointer(recvType(obj)) {
var c Call
c.Call.Value = &Builtin{
name: "ir:wrapnilchk",
sig: types.NewSignature(nil,
types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)),
types.NewTuple(anonVar(sel.Recv())), false),
}
c.Call.Args = []Value{
v,
emitConst(fn, stringConst(deref(sel.Recv()).String())),
emitConst(fn, stringConst(sel.Obj().Name())),
}
c.setType(v.Type())
v = fn.emit(&c, nil)
}
}
// Invariant: v is a pointer, either
// value of *A receiver param, or
// address of A spilled receiver.
// We use pointer arithmetic (FieldAddr possibly followed by
// Load) in preference to value extraction (Field possibly
// preceded by Load).
v = emitImplicitSelections(fn, v, indices[:len(indices)-1], nil)
// Invariant: v is a pointer, either
// value of implicit *C field, or
// address of implicit C field.
var c Call
if r := recvType(obj); !isInterface(r) { // concrete method
if !isPointer(r) {
v = emitLoad(fn, v, nil)
}
c.Call.Value = prog.declaredFunc(obj)
c.Call.Args = append(c.Call.Args, v)
} else {
c.Call.Method = obj
c.Call.Value = emitLoad(fn, v, nil)
}
for _, arg := range fn.Params[1:] {
c.Call.Args = append(c.Call.Args, arg)
}
emitTailCall(fn, &c, nil)
fn.finishBody()
return fn
}
// createParams creates parameters for wrapper method fn based on its
// Signature.Params, which do not include the receiver.
// start is the index of the first regular parameter to use.
//
func createParams(fn *Function, start int) {
tparams := fn.Signature.Params()
for i, n := start, tparams.Len(); i < n; i++ {
fn.addParamObj(tparams.At(i), nil)
}
}
// -- bounds -----------------------------------------------------------
// makeBound returns a bound method wrapper (or "bound"), a synthetic
// function that delegates to a concrete or interface method denoted
// by obj. The resulting function has no receiver, but has one free
// variable which will be used as the method's receiver in the
// tail-call.
//
// Use MakeClosure with such a wrapper to construct a bound method
// closure. e.g.:
//
// type T int or: type T interface { meth() }
// func (t T) meth()
// var t T
// f := t.meth
// f() // calls t.meth()
//
// f is a closure of a synthetic wrapper defined as if by:
//
// f := func() { return t.meth() }
//
// Unlike makeWrapper, makeBound need perform no indirection or field
// selections because that can be done before the closure is
// constructed.
//
// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
//
func makeBound(prog *Program, obj *types.Func) *Function {
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
fn, ok := prog.bounds[obj]
if !ok {
if prog.mode&LogSource != 0 {
defer logStack("%s", SyntheticBound)()
}
fn = &Function{
name: obj.Name() + "$bound",
object: obj,
Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver
Synthetic: SyntheticBound,
Prog: prog,
functionBody: new(functionBody),
}
fn.initHTML(prog.PrintFunc)
fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn}
fn.FreeVars = []*FreeVar{fv}
fn.startBody()
createParams(fn, 0)
var c Call
if !isInterface(recvType(obj)) { // concrete
c.Call.Value = prog.declaredFunc(obj)
c.Call.Args = []Value{fv}
} else {
c.Call.Value = fv
c.Call.Method = obj
}
for _, arg := range fn.Params {
c.Call.Args = append(c.Call.Args, arg)
}
emitTailCall(fn, &c, nil)
fn.finishBody()
prog.bounds[obj] = fn
}
return fn
}
// -- thunks -----------------------------------------------------------
// makeThunk returns a thunk, a synthetic function that delegates to a
// concrete or interface method denoted by sel.Obj(). The resulting
// function has no receiver, but has an additional (first) regular
// parameter.
//
// Precondition: sel.Kind() == types.MethodExpr.
//
// type T int or: type T interface { meth() }
// func (t T) meth()
// f := T.meth
// var t T
// f(t) // calls t.meth()
//
// f is a synthetic wrapper defined as if by:
//
// f := func(t T) { return t.meth() }
//
// TODO(adonovan): opt: currently the stub is created even when used
// directly in a function call: C.f(i, 0). This is less efficient
// than inlining the stub.
//
// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
//
func makeThunk(prog *Program, sel *types.Selection) *Function {
if sel.Kind() != types.MethodExpr {
panic(sel)
}
key := selectionKey{
kind: sel.Kind(),
recv: sel.Recv(),
obj: sel.Obj(),
index: fmt.Sprint(sel.Index()),
indirect: sel.Indirect(),
}
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
// Canonicalize key.recv to avoid constructing duplicate thunks.
canonRecv, ok := prog.canon.At(key.recv).(types.Type)
if !ok {
canonRecv = key.recv
prog.canon.Set(key.recv, canonRecv)
}
key.recv = canonRecv
fn, ok := prog.thunks[key]
if !ok {
fn = makeWrapper(prog, sel)
if fn.Signature.Recv() != nil {
panic(fn) // unexpected receiver
}
prog.thunks[key] = fn
}
return fn
}
func changeRecv(s *types.Signature, recv *types.Var) *types.Signature {
return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic())
}
// selectionKey is like types.Selection but a usable map key.
type selectionKey struct {
kind types.SelectionKind
recv types.Type // canonicalized via Program.canon
obj types.Object
index string
indirect bool
}
// makeInstance creates a wrapper function with signature sig that calls the generic function fn.
// If targs is not nil, fn is a function and targs describes the concrete type arguments.
// If targs is nil, fn is a method and the type arguments are derived from the receiver.
func makeInstance(prog *Program, fn *Function, sig *types.Signature, targs *typeparams.TypeList) *Function {
if sig.Recv() != nil {
assert(targs == nil)
// Methods don't have their own type parameters, but the receiver does
targs = typeparams.NamedTypeArgs(deref(sig.Recv().Type()).(*types.Named))
} else {
assert(targs != nil)
}
wrapper := fn.generics.At(targs)
if wrapper != nil {
return wrapper
}
var name string
if sig.Recv() != nil {
name = fn.name
} else {
name = fmt.Sprintf("%s$generic#%d", fn.name, fn.generics.Len())
}
w := &Function{
name: name,
object: fn.object,
Signature: sig,
Synthetic: SyntheticGeneric,
Prog: prog,
functionBody: new(functionBody),
}
w.initHTML(prog.PrintFunc)
w.startBody()
if sig.Recv() != nil {
w.addParamObj(sig.Recv(), nil)
}
createParams(w, 0)
var c Call
c.Call.Value = fn
tresults := fn.Signature.Results()
if tresults.Len() == 1 {
c.typ = tresults.At(0).Type()
} else {
c.typ = tresults
}
changeType := func(v Value, typ types.Type) Value {
if types.Identical(v.Type(), typ) {
return v
}
var c ChangeType
c.X = v
c.typ = typ
return w.emit(&c, nil)
}
for i, arg := range w.Params {
if sig.Recv() != nil {
if i == 0 {
c.Call.Args = append(c.Call.Args, changeType(w.Params[0], fn.Signature.Recv().Type()))
} else {
c.Call.Args = append(c.Call.Args, changeType(arg, fn.Signature.Params().At(i-1).Type()))
}
} else {
c.Call.Args = append(c.Call.Args, changeType(arg, fn.Signature.Params().At(i).Type()))
}
}
for i := 0; i < targs.Len(); i++ {
arg := targs.At(i)
c.Call.TypeArgs = append(c.Call.TypeArgs, arg)
}
results := w.emit(&c, nil)
var ret Return
switch tresults.Len() {
case 0:
case 1:
ret.Results = []Value{changeType(results, sig.Results().At(0).Type())}
default:
for i := 0; i < tresults.Len(); i++ {
v := emitExtract(w, results, i, nil)
ret.Results = append(ret.Results, changeType(v, sig.Results().At(i).Type()))
}
}
w.Exit = w.newBasicBlock("exit")
emitJump(w, w.Exit, nil)
w.currentBlock = w.Exit
w.emit(&ret, nil)
w.currentBlock = nil
w.finishBody()
fn.generics.Set(targs, w)
return w
}

View File

@@ -0,0 +1,5 @@
package ir
func NewJump(parent *BasicBlock) *Jump {
return &Jump{anInstruction{block: parent}, ""}
}

View File

@@ -0,0 +1,84 @@
package loader
import (
"fmt"
"runtime"
"sort"
"strings"
"honnef.co/go/tools/go/buildid"
"honnef.co/go/tools/lintcmd/cache"
)
// computeHash computes a package's hash. The hash is based on all Go
// files that make up the package, as well as the hashes of imported
// packages.
func computeHash(c *cache.Cache, pkg *PackageSpec) (cache.ActionID, error) {
key := c.NewHash("package " + pkg.PkgPath)
fmt.Fprintf(key, "goos %s goarch %s\n", runtime.GOOS, runtime.GOARCH)
fmt.Fprintf(key, "import %q\n", pkg.PkgPath)
// Compute the hashes of all files making up the package. As an
// optimization, we use the build ID that Go already computed for
// us, because it is virtually identical to hashed all
// CompiledGoFiles.
success := false
if pkg.ExportFile != "" {
id, err := getBuildid(pkg.ExportFile)
if err == nil {
if idx := strings.IndexRune(id, '/'); idx > -1 {
fmt.Fprintf(key, "files %s\n", id[:idx])
success = true
}
}
}
if !success {
for _, f := range pkg.CompiledGoFiles {
h, err := cache.FileHash(f)
if err != nil {
return cache.ActionID{}, err
}
fmt.Fprintf(key, "file %s %x\n", f, h)
}
}
imps := make([]*PackageSpec, 0, len(pkg.Imports))
for _, v := range pkg.Imports {
imps = append(imps, v)
}
sort.Slice(imps, func(i, j int) bool {
return imps[i].PkgPath < imps[j].PkgPath
})
for _, dep := range imps {
if dep.ExportFile == "" {
fmt.Fprintf(key, "import %s \n", dep.PkgPath)
} else {
id, err := getBuildid(dep.ExportFile)
if err == nil {
fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, id)
} else {
fh, err := cache.FileHash(dep.ExportFile)
if err != nil {
return cache.ActionID{}, err
}
fmt.Fprintf(key, "import %s %x\n", dep.PkgPath, fh)
}
}
}
return key.Sum(), nil
}
var buildidCache = map[string]string{}
func getBuildid(f string) (string, error) {
if h, ok := buildidCache[f]; ok {
return h, nil
}
h, err := buildid.ReadFile(f)
if err != nil {
return "", err
}
buildidCache[f] = h
return h, nil
}

View File

@@ -0,0 +1,348 @@
package loader
import (
"errors"
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"go/types"
"os"
"time"
"honnef.co/go/tools/config"
"honnef.co/go/tools/lintcmd/cache"
"golang.org/x/exp/typeparams"
"golang.org/x/tools/go/gcexportdata"
"golang.org/x/tools/go/packages"
)
const MaxFileSize = 50 * 1024 * 1024 // 50 MB
var errMaxFileSize = errors.New("file exceeds max file size")
type PackageSpec struct {
ID string
Name string
PkgPath string
// Errors that occurred while building the import graph. These will
// primarily be parse errors or failure to resolve imports, but
// may also be other errors.
Errors []packages.Error
GoFiles []string
CompiledGoFiles []string
OtherFiles []string
ExportFile string
Imports map[string]*PackageSpec
TypesSizes types.Sizes
Hash cache.ActionID
Module *packages.Module
Config config.Config
}
func (spec *PackageSpec) String() string {
return spec.ID
}
type Package struct {
*PackageSpec
// Errors that occurred while loading the package. These will
// primarily be parse or type errors, but may also be lower-level
// failures such as file-system ones.
Errors []packages.Error
Types *types.Package
Fset *token.FileSet
Syntax []*ast.File
TypesInfo *types.Info
}
// Graph resolves patterns and returns packages with all the
// information required to later load type information, and optionally
// syntax trees.
//
// The provided config can set any setting with the exception of Mode.
func Graph(c *cache.Cache, cfg *packages.Config, patterns ...string) ([]*PackageSpec, error) {
var dcfg packages.Config
if cfg != nil {
dcfg = *cfg
}
dcfg.Mode = packages.NeedName |
packages.NeedImports |
packages.NeedDeps |
packages.NeedExportsFile |
packages.NeedFiles |
packages.NeedCompiledGoFiles |
packages.NeedTypesSizes |
packages.NeedModule
pkgs, err := packages.Load(&dcfg, patterns...)
if err != nil {
return nil, err
}
m := map[*packages.Package]*PackageSpec{}
packages.Visit(pkgs, nil, func(pkg *packages.Package) {
spec := &PackageSpec{
ID: pkg.ID,
Name: pkg.Name,
PkgPath: pkg.PkgPath,
Errors: pkg.Errors,
GoFiles: pkg.GoFiles,
CompiledGoFiles: pkg.CompiledGoFiles,
OtherFiles: pkg.OtherFiles,
ExportFile: pkg.ExportFile,
Imports: map[string]*PackageSpec{},
TypesSizes: pkg.TypesSizes,
Module: pkg.Module,
}
for path, imp := range pkg.Imports {
spec.Imports[path] = m[imp]
}
if cdir := config.Dir(pkg.GoFiles); cdir != "" {
cfg, err := config.Load(cdir)
if err != nil {
spec.Errors = append(spec.Errors, convertError(err)...)
}
spec.Config = cfg
} else {
spec.Config = config.DefaultConfig
}
spec.Hash, err = computeHash(c, spec)
if err != nil {
spec.Errors = append(spec.Errors, convertError(err)...)
}
m[pkg] = spec
})
out := make([]*PackageSpec, 0, len(pkgs))
for _, pkg := range pkgs {
if len(pkg.CompiledGoFiles) == 0 && len(pkg.Errors) == 0 && pkg.PkgPath != "unsafe" {
// If a package consists only of test files, then
// go/packages incorrectly(?) returns an empty package for
// the non-test variant. Get rid of those packages. See
// #646.
//
// Do not, however, skip packages that have errors. Those,
// too, may have no files, but we want to print the
// errors.
continue
}
out = append(out, m[pkg])
}
return out, nil
}
type program struct {
fset *token.FileSet
packages map[string]*types.Package
}
type Stats struct {
Source time.Duration
Export map[*PackageSpec]time.Duration
}
// Load loads the package described in spec. Imports will be loaded
// from export data, while the package itself will be loaded from
// source.
//
// An error will only be returned for system failures, such as failure
// to read export data from disk. Syntax and type errors, among
// others, will only populate the returned package's Errors field.
func Load(spec *PackageSpec) (*Package, Stats, error) {
prog := &program{
fset: token.NewFileSet(),
packages: map[string]*types.Package{},
}
stats := Stats{
Export: map[*PackageSpec]time.Duration{},
}
for _, imp := range spec.Imports {
if imp.PkgPath == "unsafe" {
continue
}
t := time.Now()
_, err := prog.loadFromExport(imp)
stats.Export[imp] = time.Since(t)
if err != nil {
return nil, stats, err
}
}
t := time.Now()
pkg, err := prog.loadFromSource(spec)
if err == errMaxFileSize {
pkg, err = prog.loadFromExport(spec)
}
stats.Source = time.Since(t)
return pkg, stats, err
}
// loadFromExport loads a package from export data.
func (prog *program) loadFromExport(spec *PackageSpec) (*Package, error) {
// log.Printf("Loading package %s from export", spec)
if spec.ExportFile == "" {
return nil, fmt.Errorf("no export data for %q", spec.ID)
}
f, err := os.Open(spec.ExportFile)
if err != nil {
return nil, err
}
defer f.Close()
r, err := gcexportdata.NewReader(f)
if err != nil {
return nil, err
}
tpkg, err := gcexportdata.Read(r, prog.fset, prog.packages, spec.PkgPath)
if err != nil {
return nil, err
}
pkg := &Package{
PackageSpec: spec,
Types: tpkg,
Fset: prog.fset,
}
// runtime.SetFinalizer(pkg, func(pkg *Package) {
// log.Println("Unloading package", pkg.PkgPath)
// })
return pkg, nil
}
// loadFromSource loads a package from source. All of its dependencies
// must have been loaded already.
func (prog *program) loadFromSource(spec *PackageSpec) (*Package, error) {
if len(spec.Errors) > 0 {
panic("LoadFromSource called on package with errors")
}
pkg := &Package{
PackageSpec: spec,
Types: types.NewPackage(spec.PkgPath, spec.Name),
Syntax: make([]*ast.File, len(spec.CompiledGoFiles)),
Fset: prog.fset,
TypesInfo: &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
},
}
typeparams.InitInstances(pkg.TypesInfo)
// runtime.SetFinalizer(pkg, func(pkg *Package) {
// log.Println("Unloading package", pkg.PkgPath)
// })
// OPT(dh): many packages have few files, much fewer than there
// are CPU cores. Additionally, parsing each individual file is
// very fast. A naive parallel implementation of this loop won't
// be faster, and tends to be slower due to extra scheduling,
// bookkeeping and potentially false sharing of cache lines.
for i, file := range spec.CompiledGoFiles {
f, err := os.Open(file)
if err != nil {
return nil, err
}
fi, err := f.Stat()
if err != nil {
return nil, err
}
if fi.Size() >= MaxFileSize {
return nil, errMaxFileSize
}
af, err := parser.ParseFile(prog.fset, file, f, parser.ParseComments)
f.Close()
if err != nil {
pkg.Errors = append(pkg.Errors, convertError(err)...)
return pkg, nil
}
pkg.Syntax[i] = af
}
importer := func(path string) (*types.Package, error) {
if path == "unsafe" {
return types.Unsafe, nil
}
if path == "C" {
// go/packages doesn't tell us that cgo preprocessing
// failed. When we subsequently try to parse the package,
// we'll encounter the raw C import.
return nil, errors.New("cgo preprocessing failed")
}
ispecpkg := spec.Imports[path]
if ispecpkg == nil {
return nil, fmt.Errorf("trying to import %q in the context of %q returned nil PackageSpec", path, spec)
}
ipkg := prog.packages[ispecpkg.PkgPath]
if ipkg == nil {
return nil, fmt.Errorf("trying to import %q (%q) in the context of %q returned nil PackageSpec", ispecpkg.PkgPath, path, spec)
}
return ipkg, nil
}
tc := &types.Config{
Importer: importerFunc(importer),
Error: func(err error) {
pkg.Errors = append(pkg.Errors, convertError(err)...)
},
}
types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax)
return pkg, nil
}
func convertError(err error) []packages.Error {
var errs []packages.Error
// taken from go/packages
switch err := err.(type) {
case packages.Error:
// from driver
errs = append(errs, err)
case *os.PathError:
// from parser
errs = append(errs, packages.Error{
Pos: err.Path + ":1",
Msg: err.Err.Error(),
Kind: packages.ParseError,
})
case scanner.ErrorList:
// from parser
for _, err := range err {
errs = append(errs, packages.Error{
Pos: err.Pos.String(),
Msg: err.Msg,
Kind: packages.ParseError,
})
}
case types.Error:
// from type checker
errs = append(errs, packages.Error{
Pos: err.Fset.Position(err.Pos).String(),
Msg: err.Msg,
Kind: packages.TypeError,
})
case config.ParseError:
errs = append(errs, packages.Error{
Pos: fmt.Sprintf("%s:%d", err.Filename, err.Line),
Msg: fmt.Sprintf("%s (last key parsed: %q)", err.Message, err.LastKey),
Kind: packages.ParseError,
})
default:
errs = append(errs, packages.Error{
Pos: "-",
Msg: err.Error(),
Kind: packages.UnknownError,
})
}
return errs
}
type importerFunc func(path string) (*types.Package, error)
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }

View File

@@ -0,0 +1,18 @@
package typeutil
import (
"fmt"
"go/types"
)
type Iterator struct {
elem types.Type
}
func (t *Iterator) Underlying() types.Type { return t }
func (t *Iterator) String() string { return fmt.Sprintf("iterator(%s)", t.elem) }
func (t *Iterator) Elem() types.Type { return t.elem }
func NewIterator(elem types.Type) *Iterator {
return &Iterator{elem: elem}
}

View File

@@ -0,0 +1,106 @@
package typeutil
import (
"errors"
"go/types"
"golang.org/x/exp/typeparams"
)
type TypeSet struct {
Terms []*typeparams.Term
empty bool
}
func NewTypeSet(typ types.Type) TypeSet {
terms, err := typeparams.NormalTerms(typ)
if err != nil {
if errors.Is(err, typeparams.ErrEmptyTypeSet) {
return TypeSet{nil, true}
} else {
// We couldn't determine the type set. Assume it's all types.
return TypeSet{nil, false}
}
}
return TypeSet{terms, false}
}
// CoreType returns the type set's core type, or nil if it has none.
// The function only looks at type terms and may thus return core types for some empty type sets, such as
// 'interface { map[int]string; foo() }'
func (ts TypeSet) CoreType() types.Type {
if len(ts.Terms) == 0 {
// Either the type set is empty, or it isn't constrained. Either way it doesn't have a core type.
return nil
}
typ := ts.Terms[0].Type().Underlying()
for _, term := range ts.Terms[1:] {
ut := term.Type().Underlying()
if types.Identical(typ, ut) {
continue
}
ch1, ok := typ.(*types.Chan)
if !ok {
return nil
}
ch2, ok := ut.(*types.Chan)
if !ok {
return nil
}
if ch1.Dir() == types.SendRecv {
// typ is currently a bidirectional channel. The term's type is either also bidirectional, or
// unidirectional. Use the term's type.
typ = ut
} else if ch1.Dir() != ch2.Dir() {
// typ is not bidirectional and typ and term disagree about the direction
return nil
}
}
return typ
}
// CoreType is a wrapper for NewTypeSet(typ).CoreType()
func CoreType(typ types.Type) types.Type {
return NewTypeSet(typ).CoreType()
}
// All calls fn for each term in the type set and reports whether all invocations returned true.
// If the type set is empty or unconstrained, All immediately returns false.
func (ts TypeSet) All(fn func(*typeparams.Term) bool) bool {
if len(ts.Terms) == 0 {
return false
}
for _, term := range ts.Terms {
if !fn(term) {
return false
}
}
return true
}
// Any calls fn for each term in the type set and reports whether any invocation returned true.
// It stops after the first call that returned true.
func (ts TypeSet) Any(fn func(*typeparams.Term) bool) bool {
for _, term := range ts.Terms {
if fn(term) {
return true
}
}
return false
}
// All is a wrapper for NewTypeSet(typ).All(fn).
func All(typ types.Type, fn func(*typeparams.Term) bool) bool {
return NewTypeSet(typ).All(fn)
}
// Any is a wrapper for NewTypeSet(typ).Any(fn).
func Any(typ types.Type, fn func(*typeparams.Term) bool) bool {
return NewTypeSet(typ).Any(fn)
}
func IsSlice(term *typeparams.Term) bool {
_, ok := term.Type().Underlying().(*types.Slice)
return ok
}

View File

@@ -0,0 +1,25 @@
package typeutil
import (
"go/ast"
"go/types"
_ "unsafe"
"golang.org/x/tools/go/types/typeutil"
)
type MethodSetCache = typeutil.MethodSetCache
type Map = typeutil.Map
type Hasher = typeutil.Hasher
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
return typeutil.Callee(info, call)
}
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
return typeutil.IntuitiveMethodSet(T, msets)
}
func MakeHasher() Hasher {
return typeutil.MakeHasher()
}

View File

@@ -0,0 +1,131 @@
package typeutil
import (
"bytes"
"go/types"
"sync"
)
var bufferPool = &sync.Pool{
New: func() interface{} {
buf := bytes.NewBuffer(nil)
buf.Grow(64)
return buf
},
}
func FuncName(f *types.Func) string {
buf := bufferPool.Get().(*bytes.Buffer)
buf.Reset()
if f.Type() != nil {
sig := f.Type().(*types.Signature)
if recv := sig.Recv(); recv != nil {
buf.WriteByte('(')
if _, ok := recv.Type().(*types.Interface); ok {
// gcimporter creates abstract methods of
// named interfaces using the interface type
// (not the named type) as the receiver.
// Don't print it in full.
buf.WriteString("interface")
} else {
types.WriteType(buf, recv.Type(), nil)
}
buf.WriteByte(')')
buf.WriteByte('.')
} else if f.Pkg() != nil {
writePackage(buf, f.Pkg())
}
}
buf.WriteString(f.Name())
s := buf.String()
bufferPool.Put(buf)
return s
}
func writePackage(buf *bytes.Buffer, pkg *types.Package) {
if pkg == nil {
return
}
s := pkg.Path()
if s != "" {
buf.WriteString(s)
buf.WriteByte('.')
}
}
// Dereference returns a pointer's element type; otherwise it returns
// T.
func Dereference(T types.Type) types.Type {
if p, ok := T.Underlying().(*types.Pointer); ok {
return p.Elem()
}
return T
}
// DereferenceR returns a pointer's element type; otherwise it returns
// T. If the element type is itself a pointer, DereferenceR will be
// applied recursively.
func DereferenceR(T types.Type) types.Type {
if p, ok := T.Underlying().(*types.Pointer); ok {
return DereferenceR(p.Elem())
}
return T
}
func IsObject(obj types.Object, name string) bool {
var path string
if pkg := obj.Pkg(); pkg != nil {
path = pkg.Path() + "."
}
return path+obj.Name() == name
}
// OPT(dh): IsType is kind of expensive; should we really use it?
func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name }
func IsPointerLike(T types.Type) bool {
switch T := T.Underlying().(type) {
case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer, *types.Slice:
return true
case *types.Basic:
return T.Kind() == types.UnsafePointer
}
return false
}
type Field struct {
Var *types.Var
Tag string
Path []int
}
// FlattenFields recursively flattens T and embedded structs,
// returning a list of fields. If multiple fields with the same name
// exist, all will be returned.
func FlattenFields(T *types.Struct) []Field {
return flattenFields(T, nil, nil)
}
func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field {
if seen == nil {
seen = map[types.Type]bool{}
}
if seen[T] {
return nil
}
seen[T] = true
var out []Field
for i := 0; i < T.NumFields(); i++ {
field := T.Field(i)
tag := T.Tag(i)
np := append(path[:len(path):len(path)], i)
if field.Anonymous() {
if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok {
out = append(out, flattenFields(s, np, seen)...)
}
} else {
out = append(out, Field{field, tag, np})
}
}
return out
}

View File

@@ -0,0 +1,107 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package buildir defines an Analyzer that constructs the IR
// of an error-free package and returns the set of all
// functions within it. It does not report any diagnostics itself but
// may be used as an input to other analyzers.
//
// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE.
package buildir
import (
"go/ast"
"go/types"
"reflect"
"honnef.co/go/tools/go/ir"
"golang.org/x/tools/go/analysis"
)
type noReturn struct {
Kind ir.NoReturn
}
func (*noReturn) AFact() {}
var Analyzer = &analysis.Analyzer{
Name: "buildir",
Doc: "build IR for later passes",
Run: run,
ResultType: reflect.TypeOf(new(IR)),
FactTypes: []analysis.Fact{new(noReturn)},
}
// IR provides intermediate representation for all the
// non-blank source functions in the current package.
type IR struct {
Pkg *ir.Package
SrcFuncs []*ir.Function
}
func run(pass *analysis.Pass) (interface{}, error) {
// Plundered from ssautil.BuildPackage.
// We must create a new Program for each Package because the
// analysis API provides no place to hang a Program shared by
// all Packages. Consequently, IR Packages and Functions do not
// have a canonical representation across an analysis session of
// multiple packages. This is unlikely to be a problem in
// practice because the analysis API essentially forces all
// packages to be analysed independently, so any given call to
// Analysis.Run on a package will see only IR objects belonging
// to a single Program.
mode := ir.GlobalDebug
prog := ir.NewProgram(pass.Fset, mode)
// Create IR packages for all imports.
// Order is not significant.
created := make(map[*types.Package]bool)
var createAll func(pkgs []*types.Package)
createAll = func(pkgs []*types.Package) {
for _, p := range pkgs {
if !created[p] {
created[p] = true
irpkg := prog.CreatePackage(p, nil, nil, true)
for _, fn := range irpkg.Functions {
if ast.IsExported(fn.Name()) {
var noRet noReturn
if pass.ImportObjectFact(fn.Object(), &noRet) {
fn.NoReturn = noRet.Kind
}
}
}
createAll(p.Imports())
}
}
}
createAll(pass.Pkg.Imports())
// Create and build the primary package.
irpkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false)
irpkg.Build()
// Compute list of source functions, including literals,
// in source order.
var addAnons func(f *ir.Function)
funcs := make([]*ir.Function, len(irpkg.Functions))
copy(funcs, irpkg.Functions)
addAnons = func(f *ir.Function) {
for _, anon := range f.AnonFuncs {
funcs = append(funcs, anon)
addAnons(anon)
}
}
for _, fn := range irpkg.Functions {
addAnons(fn)
if fn.NoReturn > 0 {
pass.ExportObjectFact(fn.Object(), &noReturn{fn.NoReturn})
}
}
return &IR{Pkg: irpkg, SrcFuncs: funcs}, nil
}

View File

@@ -0,0 +1,2 @@
This package is a copy of cmd/go/internal/renameio.
The upstream package no longer exists, as the Go project replaced all of its uses with the lockedfile package.

View File

@@ -0,0 +1,93 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package renameio writes files atomically by renaming temporary files.
package renameio
import (
"bytes"
"io"
"math/rand"
"os"
"path/filepath"
"strconv"
"honnef.co/go/tools/internal/robustio"
)
const patternSuffix = ".tmp"
// Pattern returns a glob pattern that matches the unrenamed temporary files
// created when writing to filename.
func Pattern(filename string) string {
return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix)
}
// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary
// file in the same directory as filename, then renames it atomically to the
// final name.
//
// That ensures that the final location, if it exists, is always a complete file.
func WriteFile(filename string, data []byte, perm os.FileMode) (err error) {
return WriteToFile(filename, bytes.NewReader(data), perm)
}
// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader
// instead of a slice.
func WriteToFile(filename string, data io.Reader, perm os.FileMode) (err error) {
f, err := tempFile(filepath.Dir(filename), filepath.Base(filename), perm)
if err != nil {
return err
}
defer func() {
// Only call os.Remove on f.Name() if we failed to rename it: otherwise,
// some other process may have created a new file with the same name after
// that.
if err != nil {
f.Close()
os.Remove(f.Name())
}
}()
if _, err := io.Copy(f, data); err != nil {
return err
}
// Sync the file before renaming it: otherwise, after a crash the reader may
// observe a 0-length file instead of the actual contents.
// See https://golang.org/issue/22397#issuecomment-380831736.
if err := f.Sync(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
return robustio.Rename(f.Name(), filename)
}
// ReadFile is like ioutil.ReadFile, but on Windows retries spurious errors that
// may occur if the file is concurrently replaced.
//
// Errors are classified heuristically and retries are bounded, so even this
// function may occasionally return a spurious error on Windows.
// If so, the error will likely wrap one of:
// - syscall.ERROR_ACCESS_DENIED
// - syscall.ERROR_FILE_NOT_FOUND
// - internal/syscall/windows.ERROR_SHARING_VIOLATION
func ReadFile(filename string) ([]byte, error) {
return robustio.ReadFile(filename)
}
// tempFile creates a new temporary file with given permission bits.
func tempFile(dir, prefix string, perm os.FileMode) (f *os.File, err error) {
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+patternSuffix)
f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm)
if os.IsExist(err) {
continue
}
break
}
return
}

View File

@@ -0,0 +1,6 @@
This package is a copy of cmd/go/internal/robustio.
It is mostly in sync with upstream according to the last commit we've looked at,
with the exception of still using I/O functions that work with older Go versions.
The last upstream commit we've looked at was:
06ac303f6a14b133254f757e54599c48e3c2a4ad

View File

@@ -0,0 +1,53 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package robustio wraps I/O functions that are prone to failure on Windows,
// transparently retrying errors up to an arbitrary timeout.
//
// Errors are classified heuristically and retries are bounded, so the functions
// in this package do not completely eliminate spurious errors. However, they do
// significantly reduce the rate of failure in practice.
//
// If so, the error will likely wrap one of:
// The functions in this package do not completely eliminate spurious errors,
// but substantially reduce their rate of occurrence in practice.
package robustio
// Rename is like os.Rename, but on Windows retries errors that may occur if the
// file is concurrently read or overwritten.
//
// (See golang.org/issue/31247 and golang.org/issue/32188.)
func Rename(oldpath, newpath string) error {
return rename(oldpath, newpath)
}
// ReadFile is like ioutil.ReadFile, but on Windows retries errors that may
// occur if the file is concurrently replaced.
//
// (See golang.org/issue/31247 and golang.org/issue/32188.)
func ReadFile(filename string) ([]byte, error) {
return readFile(filename)
}
// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur
// if an executable file in the directory has recently been executed.
//
// (See golang.org/issue/19491.)
func RemoveAll(path string) error {
return removeAll(path)
}
// IsEphemeralError reports whether err is one of the errors that the functions
// in this package attempt to mitigate.
//
// Errors considered ephemeral include:
// - syscall.ERROR_ACCESS_DENIED
// - syscall.ERROR_FILE_NOT_FOUND
// - internal/syscall/windows.ERROR_SHARING_VIOLATION
//
// This set may be expanded in the future; programs must not rely on the
// non-ephemerality of any given error.
func IsEphemeralError(err error) bool {
return isEphemeralError(err)
}

View File

@@ -0,0 +1,21 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package robustio
import (
"errors"
"syscall"
)
const errFileNotFound = syscall.ENOENT
// isEphemeralError returns true if err may be resolved by waiting.
func isEphemeralError(err error) bool {
var errno syscall.Errno
if errors.As(err, &errno) {
return errno == errFileNotFound
}
return false
}

View File

@@ -0,0 +1,93 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build windows || darwin
// +build windows darwin
package robustio
import (
"errors"
"io/ioutil"
"math/rand"
"os"
"syscall"
"time"
)
const arbitraryTimeout = 2000 * time.Millisecond
// retry retries ephemeral errors from f up to an arbitrary timeout
// to work around filesystem flakiness on Windows and Darwin.
func retry(f func() (err error, mayRetry bool)) error {
var (
bestErr error
lowestErrno syscall.Errno
start time.Time
nextSleep time.Duration = 1 * time.Millisecond
)
for {
err, mayRetry := f()
if err == nil || !mayRetry {
return err
}
var errno syscall.Errno
if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) {
bestErr = err
lowestErrno = errno
} else if bestErr == nil {
bestErr = err
}
if start.IsZero() {
start = time.Now()
} else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout {
break
}
time.Sleep(nextSleep)
nextSleep += time.Duration(rand.Int63n(int64(nextSleep)))
}
return bestErr
}
// rename is like os.Rename, but retries ephemeral errors.
//
// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with
// MOVEFILE_REPLACE_EXISTING.
//
// Windows also provides a different system call, ReplaceFile,
// that provides similar semantics, but perhaps preserves more metadata. (The
// documentation on the differences between the two is very sparse.)
//
// Empirical error rates with MoveFileEx are lower under modest concurrency, so
// for now we're sticking with what the os package already provides.
func rename(oldpath, newpath string) (err error) {
return retry(func() (err error, mayRetry bool) {
err = os.Rename(oldpath, newpath)
return err, isEphemeralError(err)
})
}
// readFile is like ioutil.ReadFile, but retries ephemeral errors.
func readFile(filename string) ([]byte, error) {
var b []byte
err := retry(func() (err error, mayRetry bool) {
b, err = ioutil.ReadFile(filename)
// Unlike in rename, we do not retry errFileNotFound here: it can occur
// as a spurious error, but the file may also genuinely not exist, so the
// increase in robustness is probably not worth the extra latency.
return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound)
})
return b, err
}
func removeAll(path string) error {
return retry(func() (err error, mayRetry bool) {
err = os.RemoveAll(path)
return err, isEphemeralError(err)
})
}

View File

@@ -0,0 +1,29 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !windows && !darwin
// +build !windows,!darwin
package robustio
import (
"io/ioutil"
"os"
)
func rename(oldpath, newpath string) error {
return os.Rename(oldpath, newpath)
}
func readFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
func removeAll(path string) error {
return os.RemoveAll(path)
}
func isEphemeralError(err error) bool {
return false
}

View File

@@ -0,0 +1,27 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package robustio
import (
"errors"
"syscall"
)
const ERROR_SHARING_VIOLATION = 32
const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND
// isEphemeralError returns true if err may be resolved by waiting.
func isEphemeralError(err error) bool {
var errno syscall.Errno
if errors.As(err, &errno) {
switch errno {
case syscall.ERROR_ACCESS_DENIED,
syscall.ERROR_FILE_NOT_FOUND,
ERROR_SHARING_VIOLATION:
return true
}
}
return false
}

View File

@@ -0,0 +1,208 @@
package sharedcheck
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"honnef.co/go/tools/analysis/code"
"honnef.co/go/tools/analysis/edit"
"honnef.co/go/tools/analysis/facts"
"honnef.co/go/tools/analysis/report"
"honnef.co/go/tools/go/ast/astutil"
"honnef.co/go/tools/go/ir"
"honnef.co/go/tools/go/ir/irutil"
"honnef.co/go/tools/go/types/typeutil"
"honnef.co/go/tools/internal/passes/buildir"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
)
func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) {
for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
cb := func(node ast.Node) bool {
rng, ok := node.(*ast.RangeStmt)
if !ok || !astutil.IsBlank(rng.Key) {
return true
}
v, _ := fn.ValueForExpr(rng.X)
// Check that we're converting from string to []rune
val, _ := v.(*ir.Convert)
if val == nil {
return true
}
Tsrc, ok := typeutil.CoreType(val.X.Type()).(*types.Basic)
if !ok || Tsrc.Kind() != types.String {
return true
}
Tdst, ok := typeutil.CoreType(val.Type()).(*types.Slice)
if !ok {
return true
}
TdstElem, ok := Tdst.Elem().(*types.Basic)
if !ok || TdstElem.Kind() != types.Int32 {
return true
}
// Check that the result of the conversion is only used to
// range over
refs := val.Referrers()
if refs == nil {
return true
}
// Expect two refs: one for obtaining the length of the slice,
// one for accessing the elements
if len(irutil.FilterDebug(*refs)) != 2 {
// TODO(dh): right now, we check that only one place
// refers to our slice. This will miss cases such as
// ranging over the slice twice. Ideally, we'd ensure that
// the slice is only used for ranging over (without
// accessing the key), but that is harder to do because in
// IR form, ranging over a slice looks like an ordinary
// loop with index increments and slice accesses. We'd
// have to look at the associated AST node to check that
// it's a range statement.
return true
}
pass.Reportf(rng.Pos(), "should range over string, not []rune(string)")
return true
}
if source := fn.Source(); source != nil {
ast.Inspect(source, cb)
}
}
return nil, nil
}
// RedundantTypeInDeclarationChecker returns a checker that flags variable declarations with redundantly specified types.
// That is, it flags 'var v T = e' where e's type is identical to T and 'var v = e' (or 'v := e') would have the same effect.
//
// It does not flag variables under the following conditions, to reduce the number of false positives:
// - global variables these often specify types to aid godoc
// - files that use cgo cgo code generation and pointer checking emits redundant types
//
// It does not flag variables under the following conditions, unless flagHelpfulTypes is true, to reduce the number of noisy positives:
// - packages that import syscall or unsafe these sometimes use this form of assignment to make sure types are as expected
// - variables named the blank identifier a pattern used to confirm the types of variables
// - untyped expressions on the rhs the explicitness might aid readability
func RedundantTypeInDeclarationChecker(verb string, flagHelpfulTypes bool) *analysis.Analyzer {
fn := func(pass *analysis.Pass) (interface{}, error) {
eval := func(expr ast.Expr) (types.TypeAndValue, error) {
info := &types.Info{
Types: map[ast.Expr]types.TypeAndValue{},
}
err := types.CheckExpr(pass.Fset, pass.Pkg, expr.Pos(), expr, info)
return info.Types[expr], err
}
if !flagHelpfulTypes {
// Don't look at code in low-level packages
for _, imp := range pass.Pkg.Imports() {
if imp.Path() == "syscall" || imp.Path() == "unsafe" {
return nil, nil
}
}
}
fn := func(node ast.Node) {
decl := node.(*ast.GenDecl)
if decl.Tok != token.VAR {
return
}
gen, _ := code.Generator(pass, decl.Pos())
if gen == facts.Cgo {
// TODO(dh): remove this exception once we can use UsesCgo
return
}
// Delay looking up parent AST nodes until we have to
checkedDecl := false
specLoop:
for _, spec := range decl.Specs {
spec := spec.(*ast.ValueSpec)
if spec.Type == nil {
continue
}
if len(spec.Names) != len(spec.Values) {
continue
}
Tlhs := pass.TypesInfo.TypeOf(spec.Type)
for i, v := range spec.Values {
if !flagHelpfulTypes && spec.Names[i].Name == "_" {
continue specLoop
}
Trhs := pass.TypesInfo.TypeOf(v)
if !types.Identical(Tlhs, Trhs) {
continue specLoop
}
// Some expressions are untyped and get converted to the lhs type implicitly.
// This applies to untyped constants, shift operations with an untyped lhs, and possibly others.
//
// Check if the type is truly redundant, i.e. if the type on the lhs doesn't match the default type of the untyped constant.
tv, err := eval(v)
if err != nil {
panic(err)
}
if b, ok := tv.Type.(*types.Basic); ok && (b.Info()&types.IsUntyped) != 0 {
if Tlhs != types.Default(b) {
// The rhs is untyped and its default type differs from the explicit type on the lhs
continue specLoop
}
switch v := v.(type) {
case *ast.Ident:
// Only flag named constant rhs if it's a predeclared identifier.
// Don't flag other named constants, as the explicit type may aid readability.
if pass.TypesInfo.ObjectOf(v).Pkg() != nil && !flagHelpfulTypes {
continue specLoop
}
case *ast.BasicLit:
// Do flag basic literals
default:
// Don't flag untyped rhs expressions unless flagHelpfulTypes is set
if !flagHelpfulTypes {
continue specLoop
}
}
}
}
if !checkedDecl {
// Don't flag global variables. These often have explicit types for godoc's sake.
path, _ := astutil.PathEnclosingInterval(code.File(pass, decl), decl.Pos(), decl.Pos())
pathLoop:
for _, el := range path {
switch el.(type) {
case *ast.FuncDecl, *ast.FuncLit:
checkedDecl = true
break pathLoop
}
}
if !checkedDecl {
// decl is not inside a function
break specLoop
}
}
report.Report(pass, spec.Type, fmt.Sprintf("%s omit type %s from declaration; it will be inferred from the right-hand side", verb, report.Render(pass, spec.Type)), report.FilterGenerated(),
report.Fixes(edit.Fix("Remove redundant type", edit.Delete(spec.Type))))
}
}
code.Preorder(pass, fn, (*ast.GenDecl)(nil))
return nil, nil
}
return &analysis.Analyzer{
Run: fn,
Requires: []*analysis.Analyzer{facts.Generated, inspect.Analyzer, facts.TokenFile, facts.Generated},
}
}

View File

@@ -0,0 +1,36 @@
package sync
type Semaphore struct {
ch chan struct{}
}
func NewSemaphore(size int) Semaphore {
return Semaphore{
ch: make(chan struct{}, size),
}
}
func (sem Semaphore) Acquire() {
sem.ch <- struct{}{}
}
func (sem Semaphore) AcquireMaybe() bool {
select {
case sem.ch <- struct{}{}:
return true
default:
return false
}
}
func (sem Semaphore) Release() {
<-sem.ch
}
func (sem Semaphore) Len() int {
return len(sem.ch)
}
func (sem Semaphore) Cap() int {
return cap(sem.ch)
}

View File

@@ -0,0 +1,69 @@
package knowledge
var Args = map[string]int{
"(*encoding/json.Decoder).Decode.v": 0,
"(*encoding/json.Encoder).Encode.v": 0,
"(*encoding/xml.Decoder).Decode.v": 0,
"(*encoding/xml.Encoder).Encode.v": 0,
"(*sync.Pool).Put.x": 0,
"(*text/template.Template).Parse.text": 0,
"(io.Seeker).Seek.offset": 0,
"(time.Time).Sub.u": 0,
"append.elems": 1,
"append.slice": 0,
"bytes.Equal.a": 0,
"bytes.Equal.b": 1,
"encoding/binary.Write.data": 2,
"errors.New.text": 0,
"fmt.Fprintf.format": 1,
"fmt.Printf.format": 0,
"fmt.Sprintf.a[0]": 1,
"fmt.Sprintf.format": 0,
"json.Marshal.v": 0,
"json.Unmarshal.v": 1,
"len.v": 0,
"make.size[0]": 1,
"make.size[1]": 2,
"make.t": 0,
"net/url.Parse.rawurl": 0,
"os.OpenFile.flag": 1,
"os/exec.Command.name": 0,
"os/signal.Notify.c": 0,
"regexp.Compile.expr": 0,
"runtime.SetFinalizer.finalizer": 1,
"runtime.SetFinalizer.obj": 0,
"sort.Sort.data": 0,
"strconv.AppendFloat.bitSize": 4,
"strconv.AppendFloat.fmt": 2,
"strconv.AppendInt.base": 2,
"strconv.AppendUint.base": 2,
"strconv.FormatComplex.bitSize": 3,
"strconv.FormatComplex.fmt": 1,
"strconv.FormatFloat.bitSize": 3,
"strconv.FormatFloat.fmt": 1,
"strconv.FormatInt.base": 1,
"strconv.FormatUint.base": 1,
"strconv.ParseComplex.bitSize": 1,
"strconv.ParseFloat.bitSize": 1,
"strconv.ParseInt.base": 1,
"strconv.ParseInt.bitSize": 2,
"strconv.ParseUint.base": 1,
"strconv.ParseUint.bitSize": 2,
"time.Parse.layout": 0,
"time.Sleep.d": 0,
"xml.Marshal.v": 0,
"xml.Unmarshal.v": 1,
}
// Arg turns the name of an argument into an argument index.
// Indices are zero-based and method receivers do not count as arguments.
//
// Arg refers to a manually compiled mapping (see the Args variable.)
// Modify the knowledge package to add new arguments.
func Arg(name string) int {
n, ok := Args[name]
if !ok {
panic("unknown argument " + name)
}
return n
}

View File

@@ -0,0 +1,253 @@
package knowledge
const (
// DeprecatedNeverUse indicates that an API should never be used, regardless of Go version.
DeprecatedNeverUse = -1
// DeprecatedUseNoLonger indicates that an API has no use anymore.
DeprecatedUseNoLonger = -2
)
// Deprecation describes when a Go API has been deprecated.
type Deprecation struct {
// The minor Go version since which this API has been deprecated.
DeprecatedSince int
// The minor Go version since which an alternative API has been available.
// May also be one of DeprecatedNeverUse or DeprecatedUseNoLonger.
AlternativeAvailableSince int
}
// go/importer.ForCompiler contains "Deprecated:", but it refers to a single argument, not the whole function.
// Luckily, the notice starts in the middle of a paragraph, and as such isn't detected by us.
// StdlibDeprecations contains a mapping of Go API (such as variables, methods, or fields, among others)
// to information about when it has been deprecated.
var StdlibDeprecations = map[string]Deprecation{
// FIXME(dh): AllowBinary isn't being detected as deprecated
// because the comment has a newline right after "Deprecated:"
"go/build.AllowBinary": {7, 7},
"(archive/zip.FileHeader).CompressedSize": {1, 1},
"(archive/zip.FileHeader).UncompressedSize": {1, 1},
"(archive/zip.FileHeader).ModifiedTime": {10, 10},
"(archive/zip.FileHeader).ModifiedDate": {10, 10},
"(*archive/zip.FileHeader).ModTime": {10, 10},
"(*archive/zip.FileHeader).SetModTime": {10, 10},
"(go/doc.Package).Bugs": {1, 1},
"os.SEEK_SET": {7, 7},
"os.SEEK_CUR": {7, 7},
"os.SEEK_END": {7, 7},
"(net.Dialer).Cancel": {7, 7},
"runtime.CPUProfile": {9, 0},
"compress/flate.ReadError": {6, DeprecatedUseNoLonger},
"compress/flate.WriteError": {6, DeprecatedUseNoLonger},
"path/filepath.HasPrefix": {0, DeprecatedNeverUse},
"(net/http.Transport).Dial": {7, 7},
"(net/http.Transport).DialTLS": {14, 14},
"(*net/http.Transport).CancelRequest": {6, 5},
"net/http.ErrWriteAfterFlush": {7, DeprecatedUseNoLonger},
"net/http.ErrHeaderTooLong": {8, DeprecatedUseNoLonger},
"net/http.ErrShortBody": {8, DeprecatedUseNoLonger},
"net/http.ErrMissingContentLength": {8, DeprecatedUseNoLonger},
"net/http/httputil.ErrPersistEOF": {0, DeprecatedUseNoLonger},
"net/http/httputil.ErrClosed": {0, DeprecatedUseNoLonger},
"net/http/httputil.ErrPipeline": {0, DeprecatedUseNoLonger},
"net/http/httputil.ServerConn": {0, 0},
"net/http/httputil.NewServerConn": {0, 0},
"net/http/httputil.ClientConn": {0, 0},
"net/http/httputil.NewClientConn": {0, 0},
"net/http/httputil.NewProxyClientConn": {0, 0},
"(net/http.Request).Cancel": {7, 7},
"(text/template/parse.PipeNode).Line": {1, DeprecatedUseNoLonger},
"(text/template/parse.ActionNode).Line": {1, DeprecatedUseNoLonger},
"(text/template/parse.BranchNode).Line": {1, DeprecatedUseNoLonger},
"(text/template/parse.TemplateNode).Line": {1, DeprecatedUseNoLonger},
"database/sql/driver.ColumnConverter": {9, 9},
"database/sql/driver.Execer": {8, 8},
"database/sql/driver.Queryer": {8, 8},
"(database/sql/driver.Conn).Begin": {8, 8},
"(database/sql/driver.Stmt).Exec": {8, 8},
"(database/sql/driver.Stmt).Query": {8, 8},
"syscall.StringByteSlice": {1, 1},
"syscall.StringBytePtr": {1, 1},
"syscall.StringSlicePtr": {1, 1},
"syscall.StringToUTF16": {1, 1},
"syscall.StringToUTF16Ptr": {1, 1},
"(*regexp.Regexp).Copy": {12, DeprecatedUseNoLonger},
"(archive/tar.Header).Xattrs": {10, 10},
"archive/tar.TypeRegA": {11, 1},
"go/types.NewInterface": {11, 11},
"(*go/types.Interface).Embedded": {11, 11},
"go/importer.For": {12, 12},
"encoding/json.InvalidUTF8Error": {2, DeprecatedUseNoLonger},
"encoding/json.UnmarshalFieldError": {2, DeprecatedUseNoLonger},
"encoding/csv.ErrTrailingComma": {2, DeprecatedUseNoLonger},
"(encoding/csv.Reader).TrailingComma": {2, DeprecatedUseNoLonger},
"(net.Dialer).DualStack": {12, 12},
"net/http.ErrUnexpectedTrailer": {12, DeprecatedUseNoLonger},
"net/http.CloseNotifier": {11, 7},
// This is hairy. The notice says "Not all errors in the http package related to protocol errors are of type ProtocolError", but doesn't that imply that some errors do?
"net/http.ProtocolError": {8, DeprecatedUseNoLonger},
"(crypto/x509.CertificateRequest).Attributes": {5, 3},
// These functions have no direct alternative, but they are insecure and should no longer be used.
"crypto/x509.IsEncryptedPEMBlock": {16, DeprecatedNeverUse},
"crypto/x509.DecryptPEMBlock": {16, DeprecatedNeverUse},
"crypto/x509.EncryptPEMBlock": {16, DeprecatedNeverUse},
"crypto/dsa": {16, DeprecatedNeverUse},
// This function has no alternative, but also no purpose.
"(*crypto/rc4.Cipher).Reset": {12, DeprecatedNeverUse},
"(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7},
"image.ZP": {13, 0},
"image.ZR": {13, 0},
"(*debug/gosym.LineTable).LineToPC": {2, 2},
"(*debug/gosym.LineTable).PCToLine": {2, 2},
"crypto/tls.VersionSSL30": {13, DeprecatedNeverUse},
"(crypto/tls.Config).NameToCertificate": {14, DeprecatedUseNoLonger},
"(*crypto/tls.Config).BuildNameToCertificate": {14, DeprecatedUseNoLonger},
"(crypto/tls.Config).SessionTicketKey": {16, 5},
// No alternative, no use
"(crypto/tls.ConnectionState).NegotiatedProtocolIsMutual": {16, DeprecatedNeverUse},
// No alternative, but insecure
"(crypto/tls.ConnectionState).TLSUnique": {16, DeprecatedNeverUse},
"image/jpeg.Reader": {4, DeprecatedNeverUse},
// All of these have been deprecated in favour of external libraries
"syscall.AttachLsf": {7, 0},
"syscall.DetachLsf": {7, 0},
"syscall.LsfSocket": {7, 0},
"syscall.SetLsfPromisc": {7, 0},
"syscall.LsfJump": {7, 0},
"syscall.LsfStmt": {7, 0},
"syscall.BpfStmt": {7, 0},
"syscall.BpfJump": {7, 0},
"syscall.BpfBuflen": {7, 0},
"syscall.SetBpfBuflen": {7, 0},
"syscall.BpfDatalink": {7, 0},
"syscall.SetBpfDatalink": {7, 0},
"syscall.SetBpfPromisc": {7, 0},
"syscall.FlushBpf": {7, 0},
"syscall.BpfInterface": {7, 0},
"syscall.SetBpfInterface": {7, 0},
"syscall.BpfTimeout": {7, 0},
"syscall.SetBpfTimeout": {7, 0},
"syscall.BpfStats": {7, 0},
"syscall.SetBpfImmediate": {7, 0},
"syscall.SetBpf": {7, 0},
"syscall.CheckBpfVersion": {7, 0},
"syscall.BpfHeadercmpl": {7, 0},
"syscall.SetBpfHeadercmpl": {7, 0},
"syscall.RouteRIB": {8, 0},
"syscall.RoutingMessage": {8, 0},
"syscall.RouteMessage": {8, 0},
"syscall.InterfaceMessage": {8, 0},
"syscall.InterfaceAddrMessage": {8, 0},
"syscall.ParseRoutingMessage": {8, 0},
"syscall.ParseRoutingSockaddr": {8, 0},
"syscall.InterfaceAnnounceMessage": {7, 0},
"syscall.InterfaceMulticastAddrMessage": {7, 0},
"syscall.FormatMessage": {5, 0},
"syscall.PostQueuedCompletionStatus": {17, 0},
"syscall.GetQueuedCompletionStatus": {17, 0},
"syscall.CreateIoCompletionPort": {17, 0},
// Not marked as deprecated with a recognizable header, but deprecated nonetheless.
"io/ioutil": {16, 16},
"bytes.Title": {18, 0},
"strings.Title": {18, 0},
"(crypto/tls.Config).PreferServerCipherSuites": {18, DeprecatedUseNoLonger},
// It's not clear if Subjects was okay to use in the past, so we err on the less noisy side of assuming that it was.
"(*crypto/x509.CertPool).Subjects": {18, DeprecatedUseNoLonger},
"go/types.NewSignature": {18, 18},
"(net.Error).Temporary": {18, DeprecatedNeverUse},
// InterfaceData is another tricky case. It was deprecated in Go 1.18, but has been useless since Go 1.4, and an
// "alternative" (using your own unsafe hacks) has existed forever. We don't want to get into hairsplitting with
// users who somehow successfully used this between 1.4 and 1.18, so we'll just tag it as deprecated since 1.18.
"(reflect.Value).InterfaceData": {18, 18},
// The following objects are only deprecated on Windows.
"syscall.Syscall": {18, 18},
"syscall.Syscall12": {18, 18},
"syscall.Syscall15": {18, 18},
"syscall.Syscall18": {18, 18},
"syscall.Syscall6": {18, 18},
"syscall.Syscall9": {18, 18},
}
// Last imported from Go at 4aa1efed4853ea067d665a952eee77c52faac774 with the following numbers of deprecations:
//
// archive/tar/common.go:2
// archive/zip/struct.go:6
// bytes/bytes.go:1
// cmd/compile/internal/ir/expr.go:1
// cmd/compile/internal/ir/type.go:1
// cmd/compile/internal/syntax/walk.go:1
// cmd/compile/internal/types/sym.go:2
// cmd/go/internal/modcmd/edit.go:1
// cmd/go/testdata/mod/example.com_deprecated_a_v1.9.0.txt:2
// cmd/go/testdata/mod/example.com_deprecated_b_v1.9.0.txt:2
// cmd/go/testdata/mod/example.com_undeprecated_v1.0.0.txt:2
// cmd/go/testdata/script/mod_deprecate_message.txt:4
// cmd/go/testdata/script/mod_edit.txt:1
// cmd/go/testdata/script/mod_list_deprecated.txt:2
// cmd/go/testdata/script/mod_list_deprecated_replace.txt:1
// cmd/internal/obj/link.go:5
// cmd/internal/obj/textflag.go:1
// cmd/vendor/golang.org/x/mod/modfile/rule.go:2
// cmd/vendor/golang.org/x/mod/semver/semver.go:1
// cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go:1
// cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go:1
// cmd/vendor/golang.org/x/sys/windows/security_windows.go:1
// cmd/vendor/golang.org/x/sys/windows/syscall_windows.go:2
// compress/flate/inflate.go:2
// crypto/dsa/dsa.go:1
// crypto/rc4/rc4.go:1
// crypto/tls/common.go:7
// crypto/x509/cert_pool.go:1
// crypto/x509/pem_decrypt.go:3
// crypto/x509/x509.go:1
// database/sql/driver/driver.go:6
// debug/gosym/pclntab.go:2
// encoding/csv/reader.go:2
// encoding/json/decode.go:1
// encoding/json/encode.go:1
// go/doc/doc.go:1
// go/importer/importer.go:2
// go/types/errorcodes.go:1
// go/types/interface.go:2
// go/types/signature.go:1
// image/geom.go:2
// image/jpeg/reader.go:1
// net/dial.go:2
// net/http/httptest/recorder.go:1
// net/http/httputil/persist.go:8
// net/http/request.go:6
// net/http/server.go:2
// net/http/socks_bundle.go:1
// net/http/transport.go:3
// net/net.go:1
// os/file.go:1
// path/filepath/path_plan9.go:1
// path/filepath/path_unix.go:1
// path/filepath/path_windows.go:1
// reflect/value.go:1
// regexp/regexp.go:1
// runtime/cpuprof.go:1
// strings/strings.go:1
// syscall/bpf_bsd.go:18
// syscall/bpf_darwin.go:18
// syscall/dll_windows.go:6
// syscall/exec_plan9.go:1
// syscall/exec_unix.go:1
// syscall/lsf_linux.go:6
// syscall/route_bsd.go:7
// syscall/route_darwin.go:1
// syscall/route_dragonfly.go:2
// syscall/route_freebsd.go:2
// syscall/route_netbsd.go:1
// syscall/route_openbsd.go:1
// syscall/syscall.go:3
// syscall/syscall_windows.go:6
// text/template/parse/node.go:5
// vendor/golang.org/x/crypto/curve25519/curve25519.go:1
// vendor/golang.org/x/text/transform/transform.go:1

View File

@@ -0,0 +1,2 @@
// Package knowledge contains manually collected information about Go APIs.
package knowledge

View File

@@ -0,0 +1,10 @@
This package is a copy of cmd/go/internal/cache.
Differences from upstream:
- we continue to use renameio instead of lockedfile for writing trim.txt
- we still use I/O helpers that work with earlier versions of Go.
- we use a cache directory specific to Staticcheck
- we use a Staticcheck-specific salt
The last upstream commit we've looked at was:
06ac303f6a14b133254f757e54599c48e3c2a4ad

View File

@@ -0,0 +1,533 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cache implements a build artifact cache.
//
// This package is a slightly modified fork of Go's
// cmd/go/internal/cache package.
package cache
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"honnef.co/go/tools/internal/renameio"
)
// An ActionID is a cache action key, the hash of a complete description of a
// repeatable computation (command line, environment variables,
// input file contents, executable contents).
type ActionID [HashSize]byte
// An OutputID is a cache output key, the hash of an output of a computation.
type OutputID [HashSize]byte
// A Cache is a package cache, backed by a file system directory tree.
type Cache struct {
dir string
now func() time.Time
salt []byte
}
// Open opens and returns the cache in the given directory.
//
// It is safe for multiple processes on a single machine to use the
// same cache directory in a local file system simultaneously.
// They will coordinate using operating system file locks and may
// duplicate effort but will not corrupt the cache.
//
// However, it is NOT safe for multiple processes on different machines
// to share a cache directory (for example, if the directory were stored
// in a network file system). File locking is notoriously unreliable in
// network file systems and may not suffice to protect the cache.
//
func Open(dir string) (*Cache, error) {
info, err := os.Stat(dir)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")}
}
for i := 0; i < 256; i++ {
name := filepath.Join(dir, fmt.Sprintf("%02x", i))
if err := os.MkdirAll(name, 0777); err != nil {
return nil, err
}
}
c := &Cache{
dir: dir,
now: time.Now,
}
return c, nil
}
func (c *Cache) SetSalt(b []byte) {
c.salt = b
}
// fileName returns the name of the file corresponding to the given id.
func (c *Cache) fileName(id [HashSize]byte, key string) string {
return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key)
}
// An entryNotFoundError indicates that a cache entry was not found, with an
// optional underlying reason.
type entryNotFoundError struct {
Err error
}
func (e *entryNotFoundError) Error() string {
if e.Err == nil {
return "cache entry not found"
}
return fmt.Sprintf("cache entry not found: %v", e.Err)
}
func (e *entryNotFoundError) Unwrap() error {
return e.Err
}
const (
// action entry file is "v1 <hex id> <hex out> <decimal size space-padded to 20 bytes> <unixnano space-padded to 20 bytes>\n"
hexSize = HashSize * 2
entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1
)
// verify controls whether to run the cache in verify mode.
// In verify mode, the cache always returns errMissing from Get
// but then double-checks in Put that the data being written
// exactly matches any existing entry. This provides an easy
// way to detect program behavior that would have been different
// had the cache entry been returned from Get.
//
// verify is enabled by setting the environment variable
// GODEBUG=gocacheverify=1.
var verify = false
var errVerifyMode = errors.New("gocacheverify=1")
// DebugTest is set when GODEBUG=gocachetest=1 is in the environment.
var DebugTest = false
func init() { initEnv() }
func initEnv() {
verify = false
debugHash = false
debug := strings.Split(os.Getenv("GODEBUG"), ",")
for _, f := range debug {
if f == "gocacheverify=1" {
verify = true
}
if f == "gocachehash=1" {
debugHash = true
}
if f == "gocachetest=1" {
DebugTest = true
}
}
}
// Get looks up the action ID in the cache,
// returning the corresponding output ID and file size, if any.
// Note that finding an output ID does not guarantee that the
// saved file for that output ID is still available.
func (c *Cache) Get(id ActionID) (Entry, error) {
if verify {
return Entry{}, &entryNotFoundError{Err: errVerifyMode}
}
return c.get(id)
}
type Entry struct {
OutputID OutputID
Size int64
Time time.Time
}
// get is Get but does not respect verify mode, so that Put can use it.
func (c *Cache) get(id ActionID) (Entry, error) {
missing := func(reason error) (Entry, error) {
return Entry{}, &entryNotFoundError{Err: reason}
}
f, err := os.Open(c.fileName(id, "a"))
if err != nil {
return missing(err)
}
defer f.Close()
entry := make([]byte, entrySize+1) // +1 to detect whether f is too long
if n, err := io.ReadFull(f, entry); n > entrySize {
return missing(errors.New("too long"))
} else if err != io.ErrUnexpectedEOF {
if err == io.EOF {
return missing(errors.New("file is empty"))
}
return missing(err)
} else if n < entrySize {
return missing(errors.New("entry file incomplete"))
}
if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' {
return missing(errors.New("invalid header"))
}
eid, entry := entry[3:3+hexSize], entry[3+hexSize:]
eout, entry := entry[1:1+hexSize], entry[1+hexSize:]
esize, entry := entry[1:1+20], entry[1+20:]
//lint:ignore SA4006 See https://github.com/dominikh/go-tools/issues/465
etime, entry := entry[1:1+20], entry[1+20:]
var buf [HashSize]byte
if _, err := hex.Decode(buf[:], eid); err != nil {
return missing(fmt.Errorf("decoding ID: %v", err))
} else if buf != id {
return missing(errors.New("mismatched ID"))
}
if _, err := hex.Decode(buf[:], eout); err != nil {
return missing(fmt.Errorf("decoding output ID: %v", err))
}
i := 0
for i < len(esize) && esize[i] == ' ' {
i++
}
size, err := strconv.ParseInt(string(esize[i:]), 10, 64)
if err != nil {
return missing(fmt.Errorf("parsing size: %v", err))
} else if size < 0 {
return missing(errors.New("negative size"))
}
i = 0
for i < len(etime) && etime[i] == ' ' {
i++
}
tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
if err != nil {
return missing(fmt.Errorf("parsing timestamp: %v", err))
} else if tm < 0 {
return missing(errors.New("negative timestamp"))
}
c.used(c.fileName(id, "a"))
return Entry{buf, size, time.Unix(0, tm)}, nil
}
// GetFile looks up the action ID in the cache and returns
// the name of the corresponding data file.
func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) {
entry, err = c.Get(id)
if err != nil {
return "", Entry{}, err
}
file = c.OutputFile(entry.OutputID)
info, err := os.Stat(file)
if err != nil {
return "", Entry{}, &entryNotFoundError{Err: err}
}
if info.Size() != entry.Size {
return "", Entry{}, &entryNotFoundError{Err: errors.New("file incomplete")}
}
return file, entry, nil
}
// GetBytes looks up the action ID in the cache and returns
// the corresponding output bytes.
// GetBytes should only be used for data that can be expected to fit in memory.
func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) {
entry, err := c.Get(id)
if err != nil {
return nil, entry, err
}
data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID))
if sha256.Sum256(data) != entry.OutputID {
return nil, entry, &entryNotFoundError{Err: errors.New("bad checksum")}
}
return data, entry, nil
}
// OutputFile returns the name of the cache file storing output with the given OutputID.
func (c *Cache) OutputFile(out OutputID) string {
file := c.fileName(out, "d")
c.used(file)
return file
}
// Time constants for cache expiration.
//
// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour),
// to avoid causing many unnecessary inode updates. The mtimes therefore
// roughly reflect "time of last use" but may in fact be older by at most an hour.
//
// We scan the cache for entries to delete at most once per trimInterval (1 day).
//
// When we do scan the cache, we delete entries that have not been used for
// at least trimLimit (5 days). Statistics gathered from a month of usage by
// Go developers found that essentially all reuse of cached entries happened
// within 5 days of the previous reuse. See golang.org/issue/22990.
const (
mtimeInterval = 1 * time.Hour
trimInterval = 24 * time.Hour
trimLimit = 5 * 24 * time.Hour
)
// used makes a best-effort attempt to update mtime on file,
// so that mtime reflects cache access time.
//
// Because the reflection only needs to be approximate,
// and to reduce the amount of disk activity caused by using
// cache entries, used only updates the mtime if the current
// mtime is more than an hour old. This heuristic eliminates
// nearly all of the mtime updates that would otherwise happen,
// while still keeping the mtimes useful for cache trimming.
func (c *Cache) used(file string) {
info, err := os.Stat(file)
if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval {
return
}
os.Chtimes(file, c.now(), c.now())
}
// Trim removes old cache entries that are likely not to be reused.
func (c *Cache) Trim() {
now := c.now()
// We maintain in dir/trim.txt the time of the last completed cache trim.
// If the cache has been trimmed recently enough, do nothing.
// This is the common case.
data, _ := renameio.ReadFile(filepath.Join(c.dir, "trim.txt"))
t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval {
return
}
// Trim each of the 256 subdirectories.
// We subtract an additional mtimeInterval
// to account for the imprecision of our "last used" mtimes.
cutoff := now.Add(-trimLimit - mtimeInterval)
for i := 0; i < 256; i++ {
subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i))
c.trimSubdir(subdir, cutoff)
}
// Ignore errors from here: if we don't write the complete timestamp, the
// cache will appear older than it is, and we'll trim it again next time.
renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666)
}
// trimSubdir trims a single cache subdirectory.
func (c *Cache) trimSubdir(subdir string, cutoff time.Time) {
// Read all directory entries from subdir before removing
// any files, in case removing files invalidates the file offset
// in the directory scan. Also, ignore error from f.Readdirnames,
// because we don't care about reporting the error and we still
// want to process any entries found before the error.
f, err := os.Open(subdir)
if err != nil {
return
}
names, _ := f.Readdirnames(-1)
f.Close()
for _, name := range names {
// Remove only cache entries (xxxx-a and xxxx-d).
if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") {
continue
}
entry := filepath.Join(subdir, name)
info, err := os.Stat(entry)
if err == nil && info.ModTime().Before(cutoff) {
os.Remove(entry)
}
}
}
// putIndexEntry adds an entry to the cache recording that executing the action
// with the given id produces an output with the given output id (hash) and size.
func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error {
// Note: We expect that for one reason or another it may happen
// that repeating an action produces a different output hash
// (for example, if the output contains a time stamp or temp dir name).
// While not ideal, this is also not a correctness problem, so we
// don't make a big deal about it. In particular, we leave the action
// cache entries writable specifically so that they can be overwritten.
//
// Setting GODEBUG=gocacheverify=1 does make a big deal:
// in verify mode we are double-checking that the cache entries
// are entirely reproducible. As just noted, this may be unrealistic
// in some cases but the check is also useful for shaking out real bugs.
entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())
if verify && allowVerify {
old, err := c.get(id)
if err == nil && (old.OutputID != out || old.Size != size) {
// panic to show stack trace, so we can see what code is generating this cache entry.
msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size)
panic(msg)
}
}
file := c.fileName(id, "a")
// Copy file to cache directory.
mode := os.O_WRONLY | os.O_CREATE
f, err := os.OpenFile(file, mode, 0666)
if err != nil {
return err
}
_, err = f.WriteString(entry)
if err == nil {
// Truncate the file only *after* writing it.
// (This should be a no-op, but truncate just in case of previous corruption.)
//
// This differs from ioutil.WriteFile, which truncates to 0 *before* writing
// via os.O_TRUNC. Truncating only after writing ensures that a second write
// of the same content to the same file is idempotent, and does not — even
// temporarily! — undo the effect of the first write.
err = f.Truncate(int64(len(entry)))
}
if closeErr := f.Close(); err == nil {
err = closeErr
}
if err != nil {
// TODO(bcmills): This Remove potentially races with another go command writing to file.
// Can we eliminate it?
os.Remove(file)
return err
}
os.Chtimes(file, c.now(), c.now()) // mainly for tests
return nil
}
// Put stores the given output in the cache as the output for the action ID.
// It may read file twice. The content of file must not change between the two passes.
func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
return c.put(id, file, true)
}
// PutNoVerify is like Put but disables the verify check
// when GODEBUG=goverifycache=1 is set.
// It is meant for data that is OK to cache but that we expect to vary slightly from run to run,
// like test output containing times and the like.
func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
return c.put(id, file, false)
}
func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) {
// Compute output ID.
h := sha256.New()
if _, err := file.Seek(0, 0); err != nil {
return OutputID{}, 0, err
}
size, err := io.Copy(h, file)
if err != nil {
return OutputID{}, 0, err
}
var out OutputID
h.Sum(out[:0])
// Copy to cached output file (if not already present).
if err := c.copyFile(file, out, size); err != nil {
return out, size, err
}
// Add to cache index.
return out, size, c.putIndexEntry(id, out, size, allowVerify)
}
// PutBytes stores the given bytes in the cache as the output for the action ID.
func (c *Cache) PutBytes(id ActionID, data []byte) error {
_, _, err := c.Put(id, bytes.NewReader(data))
return err
}
// copyFile copies file into the cache, expecting it to have the given
// output ID and size, if that file is not present already.
func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
name := c.fileName(out, "d")
info, err := os.Stat(name)
if err == nil && info.Size() == size {
// Check hash.
if f, err := os.Open(name); err == nil {
h := sha256.New()
io.Copy(h, f)
f.Close()
var out2 OutputID
h.Sum(out2[:0])
if out == out2 {
return nil
}
}
// Hash did not match. Fall through and rewrite file.
}
// Copy file to cache directory.
mode := os.O_RDWR | os.O_CREATE
if err == nil && info.Size() > size { // shouldn't happen but fix in case
mode |= os.O_TRUNC
}
f, err := os.OpenFile(name, mode, 0666)
if err != nil {
return err
}
defer f.Close()
if size == 0 {
// File now exists with correct size.
// Only one possible zero-length file, so contents are OK too.
// Early return here makes sure there's a "last byte" for code below.
return nil
}
// From here on, if any of the I/O writing the file fails,
// we make a best-effort attempt to truncate the file f
// before returning, to avoid leaving bad bytes in the file.
// Copy file to f, but also into h to double-check hash.
if _, err := file.Seek(0, 0); err != nil {
f.Truncate(0)
return err
}
h := sha256.New()
w := io.MultiWriter(f, h)
if _, err := io.CopyN(w, file, size-1); err != nil {
f.Truncate(0)
return err
}
// Check last byte before writing it; writing it will make the size match
// what other processes expect to find and might cause them to start
// using the file.
buf := make([]byte, 1)
if _, err := file.Read(buf); err != nil {
f.Truncate(0)
return err
}
h.Write(buf)
sum := h.Sum(nil)
if !bytes.Equal(sum, out[:]) {
f.Truncate(0)
return fmt.Errorf("file content changed underfoot")
}
// Commit cache file entry.
if _, err := f.Write(buf); err != nil {
f.Truncate(0)
return err
}
if err := f.Close(); err != nil {
// Data might not have been written,
// but file may look like it is the right size.
// To be extra careful, remove cached file.
os.Remove(name)
return err
}
os.Chtimes(name, c.now(), c.now()) // mainly for tests
return nil
}

View File

@@ -0,0 +1,85 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sync"
)
// Default returns the default cache to use.
func Default() (*Cache, error) {
defaultOnce.Do(initDefaultCache)
return defaultCache, defaultDirErr
}
var (
defaultOnce sync.Once
defaultCache *Cache
)
// cacheREADME is a message stored in a README in the cache directory.
// Because the cache lives outside the normal Go trees, we leave the
// README as a courtesy to explain where it came from.
const cacheREADME = `This directory holds cached build artifacts from staticcheck.
`
// initDefaultCache does the work of finding the default cache
// the first time Default is called.
func initDefaultCache() {
dir := DefaultDir()
if err := os.MkdirAll(dir, 0777); err != nil {
log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
}
if _, err := os.Stat(filepath.Join(dir, "README")); err != nil {
// Best effort.
ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666)
}
c, err := Open(dir)
if err != nil {
log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
}
defaultCache = c
}
var (
defaultDirOnce sync.Once
defaultDir string
defaultDirErr error
)
// DefaultDir returns the effective STATICCHECK_CACHE setting.
func DefaultDir() string {
// Save the result of the first call to DefaultDir for later use in
// initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that
// subprocesses will inherit it, but that means initDefaultCache can't
// otherwise distinguish between an explicit "off" and a UserCacheDir error.
defaultDirOnce.Do(func() {
defaultDir = os.Getenv("STATICCHECK_CACHE")
if filepath.IsAbs(defaultDir) {
return
}
if defaultDir != "" {
defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not an absolute path")
return
}
// Compute default location.
dir, err := os.UserCacheDir()
if err != nil {
defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not defined and %v", err)
return
}
defaultDir = filepath.Join(dir, "staticcheck")
})
return defaultDir
}

View File

@@ -0,0 +1,163 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"bytes"
"crypto/sha256"
"fmt"
"hash"
"io"
"os"
"sync"
)
var debugHash = false // set when GODEBUG=gocachehash=1
// HashSize is the number of bytes in a hash.
const HashSize = 32
// A Hash provides access to the canonical hash function used to index the cache.
// The current implementation uses salted SHA256, but clients must not assume this.
type Hash struct {
h hash.Hash
name string // for debugging
buf *bytes.Buffer // for verify
}
// Subkey returns an action ID corresponding to mixing a parent
// action ID with a string description of the subkey.
func Subkey(parent ActionID, desc string) ActionID {
h := sha256.New()
h.Write([]byte("subkey:"))
h.Write(parent[:])
h.Write([]byte(desc))
var out ActionID
h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out)
}
if verify {
hashDebug.Lock()
hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc)
hashDebug.Unlock()
}
return out
}
// NewHash returns a new Hash.
// The caller is expected to Write data to it and then call Sum.
func (c *Cache) NewHash(name string) *Hash {
h := &Hash{h: sha256.New(), name: name}
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name)
}
h.Write(c.salt)
if verify {
h.buf = new(bytes.Buffer)
}
return h
}
// Write writes data to the running hash.
func (h *Hash) Write(b []byte) (int, error) {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b)
}
if h.buf != nil {
h.buf.Write(b)
}
return h.h.Write(b)
}
// Sum returns the hash of the data written previously.
func (h *Hash) Sum() [HashSize]byte {
var out [HashSize]byte
h.h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out)
}
if h.buf != nil {
hashDebug.Lock()
if hashDebug.m == nil {
hashDebug.m = make(map[[HashSize]byte]string)
}
hashDebug.m[out] = h.buf.String()
hashDebug.Unlock()
}
return out
}
// In GODEBUG=gocacheverify=1 mode,
// hashDebug holds the input to every computed hash ID,
// so that we can work backward from the ID involved in a
// cache entry mismatch to a description of what should be there.
var hashDebug struct {
sync.Mutex
m map[[HashSize]byte]string
}
// reverseHash returns the input used to compute the hash id.
func reverseHash(id [HashSize]byte) string {
hashDebug.Lock()
s := hashDebug.m[id]
hashDebug.Unlock()
return s
}
var hashFileCache struct {
sync.Mutex
m map[string][HashSize]byte
}
// FileHash returns the hash of the named file.
// It caches repeated lookups for a given file,
// and the cache entry for a file can be initialized
// using SetFileHash.
// The hash used by FileHash is not the same as
// the hash used by NewHash.
func FileHash(file string) ([HashSize]byte, error) {
hashFileCache.Lock()
out, ok := hashFileCache.m[file]
hashFileCache.Unlock()
if ok {
return out, nil
}
h := sha256.New()
f, err := os.Open(file)
if err != nil {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
}
return [HashSize]byte{}, err
}
_, err = io.Copy(h, f)
f.Close()
if err != nil {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
}
return [HashSize]byte{}, err
}
h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out)
}
SetFileHash(file, out)
return out, nil
}
// SetFileHash sets the hash returned by FileHash for file.
func SetFileHash(file string, sum [HashSize]byte) {
hashFileCache.Lock()
if hashFileCache.m == nil {
hashFileCache.m = make(map[string][HashSize]byte)
}
hashFileCache.m[file] = sum
hashFileCache.Unlock()
}

742
vendor/honnef.co/go/tools/lintcmd/cmd.go vendored Normal file
View File

@@ -0,0 +1,742 @@
// Package lintcmd implements the frontend of an analysis runner.
// It serves as the entry-point for the staticcheck command, and can also be used to implement custom linters that behave like staticcheck.
package lintcmd
import (
"bufio"
"encoding/gob"
"flag"
"fmt"
"go/token"
"io"
"log"
"os"
"path/filepath"
"reflect"
"runtime"
"runtime/pprof"
"runtime/trace"
"sort"
"strings"
"sync"
"time"
"honnef.co/go/tools/analysis/lint"
"honnef.co/go/tools/config"
"honnef.co/go/tools/go/loader"
"honnef.co/go/tools/lintcmd/version"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/buildutil"
)
type BuildConfig struct {
Name string
Envs []string
Flags []string
}
// Command represents a linter command line tool.
type Command struct {
name string
analyzers map[string]*lint.Analyzer
version string
machineVersion string
flags struct {
fs *flag.FlagSet
tags string
tests bool
showIgnored bool
formatter string
// mutually exclusive mode flags
explain string
printVersion bool
listChecks bool
merge bool
matrix bool
debugCpuprofile string
debugMemprofile string
debugVersion bool
debugNoCompileErrors bool
debugMeasureAnalyzers string
debugTrace string
checks list
fail list
goVersion versionFlag
}
}
// NewCommand returns a new Command.
func NewCommand(name string) *Command {
cmd := &Command{
name: name,
analyzers: map[string]*lint.Analyzer{},
version: "devel",
machineVersion: "devel",
}
cmd.initFlagSet(name)
return cmd
}
// SetVersion sets the command's version.
// It is divided into a human part and a machine part.
// For example, Staticcheck 2020.2.1 had the human version "2020.2.1" and the machine version "v0.1.1".
// If you only use Semver, you can set both parts to the same value.
//
// Calling this method is optional. Both versions default to "devel", and we'll attempt to deduce more version information from the Go module.
func (cmd *Command) SetVersion(human, machine string) {
cmd.version = human
cmd.machineVersion = machine
}
// FlagSet returns the command's flag set.
// This can be used to add additional command line arguments.
func (cmd *Command) FlagSet() *flag.FlagSet {
return cmd.flags.fs
}
// AddAnalyzers adds analyzers to the command.
// These are lint.Analyzer analyzers, which wrap analysis.Analyzer analyzers, bundling them with structured documentation.
//
// To add analysis.Analyzer analyzers without providing structured documentation, use AddBareAnalyzers.
func (cmd *Command) AddAnalyzers(as ...*lint.Analyzer) {
for _, a := range as {
cmd.analyzers[a.Analyzer.Name] = a
}
}
// AddBareAnalyzers adds bare analyzers to the command.
func (cmd *Command) AddBareAnalyzers(as ...*analysis.Analyzer) {
for _, a := range as {
var title, text string
if idx := strings.Index(a.Doc, "\n\n"); idx > -1 {
title = a.Doc[:idx]
text = a.Doc[idx+2:]
}
doc := &lint.Documentation{
Title: title,
Text: text,
Severity: lint.SeverityWarning,
}
cmd.analyzers[a.Name] = &lint.Analyzer{
Doc: doc,
Analyzer: a,
}
}
}
func (cmd *Command) initFlagSet(name string) {
flags := flag.NewFlagSet("", flag.ExitOnError)
cmd.flags.fs = flags
flags.Usage = usage(name, flags)
flags.StringVar(&cmd.flags.tags, "tags", "", "List of `build tags`")
flags.BoolVar(&cmd.flags.tests, "tests", true, "Include tests")
flags.BoolVar(&cmd.flags.printVersion, "version", false, "Print version and exit")
flags.BoolVar(&cmd.flags.showIgnored, "show-ignored", false, "Don't filter ignored diagnostics")
flags.StringVar(&cmd.flags.formatter, "f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')")
flags.StringVar(&cmd.flags.explain, "explain", "", "Print description of `check`")
flags.BoolVar(&cmd.flags.listChecks, "list-checks", false, "List all available checks")
flags.BoolVar(&cmd.flags.merge, "merge", false, "Merge results of multiple Staticcheck runs")
flags.BoolVar(&cmd.flags.matrix, "matrix", false, "Read a build config matrix from stdin")
flags.StringVar(&cmd.flags.debugCpuprofile, "debug.cpuprofile", "", "Write CPU profile to `file`")
flags.StringVar(&cmd.flags.debugMemprofile, "debug.memprofile", "", "Write memory profile to `file`")
flags.BoolVar(&cmd.flags.debugVersion, "debug.version", false, "Print detailed version information about this program")
flags.BoolVar(&cmd.flags.debugNoCompileErrors, "debug.no-compile-errors", false, "Don't print compile errors")
flags.StringVar(&cmd.flags.debugMeasureAnalyzers, "debug.measure-analyzers", "", "Write analysis measurements to `file`. `file` will be opened for appending if it already exists.")
flags.StringVar(&cmd.flags.debugTrace, "debug.trace", "", "Write trace to `file`")
cmd.flags.checks = list{"inherit"}
cmd.flags.fail = list{"all"}
cmd.flags.goVersion = versionFlag("module")
flags.Var(&cmd.flags.checks, "checks", "Comma-separated list of `checks` to enable.")
flags.Var(&cmd.flags.fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.")
flags.Var(&cmd.flags.goVersion, "go", "Target Go `version` in the format '1.x', or the literal 'module' to use the module's Go version")
}
type list []string
func (list *list) String() string {
return `"` + strings.Join(*list, ",") + `"`
}
func (list *list) Set(s string) error {
if s == "" {
*list = nil
return nil
}
elems := strings.Split(s, ",")
for i, elem := range elems {
elems[i] = strings.TrimSpace(elem)
}
*list = elems
return nil
}
type versionFlag string
func (v *versionFlag) String() string {
return fmt.Sprintf("%q", string(*v))
}
func (v *versionFlag) Set(s string) error {
if s == "module" {
*v = "module"
} else {
var vf lint.VersionFlag
if err := vf.Set(s); err != nil {
return err
}
*v = versionFlag(s)
}
return nil
}
// ParseFlags parses command line flags.
// It must be called before calling Run.
// After calling ParseFlags, the values of flags can be accessed.
//
// Example:
//
// cmd.ParseFlags(os.Args[1:])
func (cmd *Command) ParseFlags(args []string) {
cmd.flags.fs.Parse(args)
}
// diagnosticDescriptor represents the uniquiely identifying information of diagnostics.
type diagnosticDescriptor struct {
Position token.Position
End token.Position
Category string
Message string
}
func (diag diagnostic) descriptor() diagnosticDescriptor {
return diagnosticDescriptor{
Position: diag.Position,
End: diag.End,
Category: diag.Category,
Message: diag.Message,
}
}
type run struct {
checkedFiles map[string]struct{}
diagnostics map[diagnosticDescriptor]diagnostic
}
func runFromLintResult(res LintResult) run {
out := run{
checkedFiles: map[string]struct{}{},
diagnostics: map[diagnosticDescriptor]diagnostic{},
}
for _, cf := range res.CheckedFiles {
out.checkedFiles[cf] = struct{}{}
}
for _, diag := range res.Diagnostics {
out.diagnostics[diag.descriptor()] = diag
}
return out
}
func decodeGob(br io.ByteReader) ([]run, error) {
var runs []run
for {
var res LintResult
if err := gob.NewDecoder(br.(io.Reader)).Decode(&res); err != nil {
if err == io.EOF {
break
} else {
return nil, err
}
}
runs = append(runs, runFromLintResult(res))
}
return runs, nil
}
// Run runs all registered analyzers and reports their findings.
// It always calls os.Exit and does not return.
func (cmd *Command) Run() {
var measureAnalyzers func(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration)
if path := cmd.flags.debugMeasureAnalyzers; path != "" {
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.Fatal(err)
}
mu := &sync.Mutex{}
measureAnalyzers = func(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration) {
mu.Lock()
defer mu.Unlock()
// FIXME(dh): print pkg.ID
if _, err := fmt.Fprintf(f, "%s\t%s\t%d\n", analysis.Name, pkg, d.Nanoseconds()); err != nil {
log.Println("error writing analysis measurements:", err)
}
}
}
if path := cmd.flags.debugCpuprofile; path != "" {
f, err := os.Create(path)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
}
if path := cmd.flags.debugTrace; path != "" {
f, err := os.Create(path)
if err != nil {
log.Fatal(err)
}
trace.Start(f)
}
defaultChecks := []string{"all"}
cs := make([]*lint.Analyzer, 0, len(cmd.analyzers))
for _, a := range cmd.analyzers {
cs = append(cs, a)
if a.Doc.NonDefault {
defaultChecks = append(defaultChecks, "-"+a.Analyzer.Name)
}
}
config.DefaultConfig.Checks = defaultChecks
switch {
case cmd.flags.debugVersion:
version.Verbose(cmd.version, cmd.machineVersion)
cmd.exit(0)
case cmd.flags.listChecks:
sort.Slice(cs, func(i, j int) bool {
return cs[i].Analyzer.Name < cs[j].Analyzer.Name
})
for _, c := range cs {
var title string
if c.Doc != nil {
title = c.Doc.Title
}
fmt.Printf("%s %s\n", c.Analyzer.Name, title)
}
cmd.exit(0)
case cmd.flags.printVersion:
version.Print(cmd.version, cmd.machineVersion)
cmd.exit(0)
case cmd.flags.explain != "":
explain := cmd.flags.explain
check, ok := cmd.analyzers[explain]
if !ok {
fmt.Fprintln(os.Stderr, "Couldn't find check", explain)
cmd.exit(1)
}
if check.Analyzer.Doc == "" {
fmt.Fprintln(os.Stderr, explain, "has no documentation")
cmd.exit(1)
}
fmt.Println(check.Doc)
fmt.Println("Online documentation\n https://staticcheck.io/docs/checks#" + check.Analyzer.Name)
cmd.exit(0)
case cmd.flags.merge:
var runs []run
if len(cmd.flags.fs.Args()) == 0 {
var err error
runs, err = decodeGob(bufio.NewReader(os.Stdin))
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("couldn't parse stdin: %s", err))
cmd.exit(1)
}
} else {
for _, path := range cmd.flags.fs.Args() {
someRuns, err := func(path string) ([]run, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
br := bufio.NewReader(f)
return decodeGob(br)
}(path)
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("couldn't parse file %s: %s", path, err))
cmd.exit(1)
}
runs = append(runs, someRuns...)
}
}
relevantDiagnostics := mergeRuns(runs)
cmd.printDiagnostics(cs, relevantDiagnostics)
default:
switch cmd.flags.formatter {
case "text", "stylish", "json", "sarif", "binary", "null":
default:
fmt.Fprintf(os.Stderr, "unsupported output format %q\n", cmd.flags.formatter)
cmd.exit(2)
}
var bconfs []BuildConfig
if cmd.flags.matrix {
if cmd.flags.tags != "" {
fmt.Fprintln(os.Stderr, "cannot use -matrix and -tags together")
cmd.exit(2)
}
var err error
bconfs, err = parseBuildConfigs(os.Stdin)
if err != nil {
if perr, ok := err.(parseBuildConfigError); ok {
fmt.Fprintf(os.Stderr, "<stdin>:%d couldn't parse build matrix: %s\n", perr.line, perr.err)
} else {
fmt.Fprintln(os.Stderr, err)
}
os.Exit(2)
}
} else {
bc := BuildConfig{}
if cmd.flags.tags != "" {
// Validate that the tags argument is well-formed. go/packages
// doesn't detect malformed build flags and returns unhelpful
// errors.
tf := buildutil.TagsFlag{}
if err := tf.Set(cmd.flags.tags); err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("invalid value %q for flag -tags: %s", cmd.flags.tags, err))
cmd.exit(1)
}
bc.Flags = []string{"-tags", cmd.flags.tags}
}
bconfs = append(bconfs, bc)
}
var runs []run
for _, bconf := range bconfs {
res, err := doLint(cs, cmd.flags.fs.Args(), &options{
BuildConfig: bconf,
LintTests: cmd.flags.tests,
GoVersion: string(cmd.flags.goVersion),
Config: config.Config{
Checks: cmd.flags.checks,
},
PrintAnalyzerMeasurement: measureAnalyzers,
})
if err != nil {
fmt.Fprintln(os.Stderr, err)
cmd.exit(1)
}
for _, w := range res.Warnings {
fmt.Fprintln(os.Stderr, "warning:", w)
}
cwd, err := os.Getwd()
if err != nil {
cwd = ""
}
relPath := func(s string) string {
if cwd == "" {
return filepath.ToSlash(s)
}
out, err := filepath.Rel(cwd, s)
if err != nil {
return filepath.ToSlash(s)
}
return filepath.ToSlash(out)
}
if cmd.flags.formatter == "binary" {
for i, s := range res.CheckedFiles {
res.CheckedFiles[i] = relPath(s)
}
for i := range res.Diagnostics {
// We turn all paths into relative, /-separated paths. This is to make -merge work correctly when
// merging runs from different OSs, with different absolute paths.
//
// We zero out Offset, because checkouts of code on different OSs may have different kinds of
// newlines and thus different offsets. We don't ever make use of the Offset, anyway. Line and
// column numbers are precomputed.
d := &res.Diagnostics[i]
d.Position.Filename = relPath(d.Position.Filename)
d.Position.Offset = 0
d.End.Filename = relPath(d.End.Filename)
d.End.Offset = 0
for j := range d.Related {
r := &d.Related[j]
r.Position.Filename = relPath(r.Position.Filename)
r.Position.Offset = 0
r.End.Filename = relPath(r.End.Filename)
r.End.Offset = 0
}
}
err := gob.NewEncoder(os.Stdout).Encode(res)
if err != nil {
fmt.Fprintf(os.Stderr, "failed writing output: %s\n", err)
cmd.exit(2)
}
} else {
runs = append(runs, runFromLintResult(res))
}
}
if cmd.flags.formatter != "binary" {
diags := mergeRuns(runs)
cmd.printDiagnostics(cs, diags)
}
}
}
func mergeRuns(runs []run) []diagnostic {
var relevantDiagnostics []diagnostic
for _, r := range runs {
for _, diag := range r.diagnostics {
switch diag.MergeIf {
case lint.MergeIfAny:
relevantDiagnostics = append(relevantDiagnostics, diag)
case lint.MergeIfAll:
doPrint := true
for _, r := range runs {
if _, ok := r.checkedFiles[diag.Position.Filename]; ok {
if _, ok := r.diagnostics[diag.descriptor()]; !ok {
doPrint = false
}
}
}
if doPrint {
relevantDiagnostics = append(relevantDiagnostics, diag)
}
}
}
}
return relevantDiagnostics
}
func (cmd *Command) exit(code int) {
if cmd.flags.debugCpuprofile != "" {
pprof.StopCPUProfile()
}
if path := cmd.flags.debugMemprofile; path != "" {
f, err := os.Create(path)
if err != nil {
panic(err)
}
runtime.GC()
pprof.WriteHeapProfile(f)
}
if cmd.flags.debugTrace != "" {
trace.Stop()
}
os.Exit(code)
}
func (cmd *Command) printDiagnostics(cs []*lint.Analyzer, diagnostics []diagnostic) {
if len(diagnostics) > 1 {
sort.Slice(diagnostics, func(i, j int) bool {
di := diagnostics[i]
dj := diagnostics[j]
pi := di.Position
pj := dj.Position
if pi.Filename != pj.Filename {
return pi.Filename < pj.Filename
}
if pi.Line != pj.Line {
return pi.Line < pj.Line
}
if pi.Column != pj.Column {
return pi.Column < pj.Column
}
if di.Message != dj.Message {
return di.Message < dj.Message
}
if di.BuildName != dj.BuildName {
return di.BuildName < dj.BuildName
}
return di.Category < dj.Category
})
filtered := []diagnostic{
diagnostics[0],
}
builds := []map[string]struct{}{
{diagnostics[0].BuildName: {}},
}
for _, diag := range diagnostics[1:] {
// We may encounter duplicate diagnostics because one file
// can be part of many packages, and because multiple
// build configurations may check the same files.
if !filtered[len(filtered)-1].equal(diag) {
if filtered[len(filtered)-1].descriptor() == diag.descriptor() {
// Diagnostics only differ in build name, track new name
builds[len(filtered)-1][diag.BuildName] = struct{}{}
} else {
filtered = append(filtered, diag)
builds = append(builds, map[string]struct{}{})
builds[len(filtered)-1][diag.BuildName] = struct{}{}
}
}
}
var names []string
for i := range filtered {
names = names[:0]
for k := range builds[i] {
names = append(names, k)
}
sort.Strings(names)
filtered[i].BuildName = strings.Join(names, ",")
}
diagnostics = filtered
}
var f formatter
switch cmd.flags.formatter {
case "text":
f = textFormatter{W: os.Stdout}
case "stylish":
f = &stylishFormatter{W: os.Stdout}
case "json":
f = jsonFormatter{W: os.Stdout}
case "sarif":
f = &sarifFormatter{
driverName: cmd.name,
driverVersion: cmd.version,
}
if cmd.name == "staticcheck" {
f.(*sarifFormatter).driverName = "Staticcheck"
f.(*sarifFormatter).driverWebsite = "https://staticcheck.io"
}
case "binary":
fmt.Fprintln(os.Stderr, "'-f binary' not supported in this context")
cmd.exit(2)
case "null":
f = nullFormatter{}
default:
fmt.Fprintf(os.Stderr, "unsupported output format %q\n", cmd.flags.formatter)
cmd.exit(2)
}
fail := cmd.flags.fail
analyzerNames := make([]string, len(cs))
for i, a := range cs {
analyzerNames[i] = a.Analyzer.Name
}
shouldExit := filterAnalyzerNames(analyzerNames, fail)
shouldExit["staticcheck"] = true
shouldExit["compile"] = true
var (
numErrors int
numWarnings int
numIgnored int
)
notIgnored := make([]diagnostic, 0, len(diagnostics))
for _, diag := range diagnostics {
if diag.Category == "compile" && cmd.flags.debugNoCompileErrors {
continue
}
if diag.Severity == severityIgnored && !cmd.flags.showIgnored {
numIgnored++
continue
}
if shouldExit[diag.Category] {
numErrors++
} else {
diag.Severity = severityWarning
numWarnings++
}
notIgnored = append(notIgnored, diag)
}
f.Format(cs, notIgnored)
if f, ok := f.(statter); ok {
f.Stats(len(diagnostics), numErrors, numWarnings, numIgnored)
}
if numErrors > 0 {
if _, ok := f.(*sarifFormatter); ok {
// When emitting SARIF, finding errors is considered success.
cmd.exit(0)
} else {
cmd.exit(1)
}
}
cmd.exit(0)
}
func usage(name string, fs *flag.FlagSet) func() {
return func() {
fmt.Fprintf(os.Stderr, "Usage: %s [flags] [packages]\n", name)
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "Flags:")
printDefaults(fs)
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "For help about specifying packages, see 'go help packages'")
}
}
// isZeroValue determines whether the string represents the zero
// value for a flag.
//
// this function has been copied from the Go standard library's 'flag' package.
func isZeroValue(f *flag.Flag, value string) bool {
// Build a zero value of the flag's Value type, and see if the
// result of calling its String method equals the value passed in.
// This works unless the Value type is itself an interface type.
typ := reflect.TypeOf(f.Value)
var z reflect.Value
if typ.Kind() == reflect.Ptr {
z = reflect.New(typ.Elem())
} else {
z = reflect.Zero(typ)
}
return value == z.Interface().(flag.Value).String()
}
// this function has been copied from the Go standard library's 'flag' package and modified to skip debug flags.
func printDefaults(fs *flag.FlagSet) {
fs.VisitAll(func(f *flag.Flag) {
// Don't print debug flags
if strings.HasPrefix(f.Name, "debug.") {
return
}
var b strings.Builder
fmt.Fprintf(&b, " -%s", f.Name) // Two spaces before -; see next two comments.
name, usage := flag.UnquoteUsage(f)
if len(name) > 0 {
b.WriteString(" ")
b.WriteString(name)
}
// Boolean flags of one ASCII letter are so common we
// treat them specially, putting their usage on the same line.
if b.Len() <= 4 { // space, space, '-', 'x'.
b.WriteString("\t")
} else {
// Four spaces before the tab triggers good alignment
// for both 4- and 8-space tab stops.
b.WriteString("\n \t")
}
b.WriteString(strings.ReplaceAll(usage, "\n", "\n \t"))
if !isZeroValue(f, f.DefValue) {
if T := reflect.TypeOf(f.Value); T.Name() == "*stringValue" && T.PkgPath() == "flag" {
// put quotes on the value
fmt.Fprintf(&b, " (default %q)", f.DefValue)
} else {
fmt.Fprintf(&b, " (default %v)", f.DefValue)
}
}
fmt.Fprint(fs.Output(), b.String(), "\n")
})
}

View File

@@ -0,0 +1,105 @@
package lintcmd
import (
"bufio"
"errors"
"fmt"
"io"
"strings"
"unicode"
)
type parseBuildConfigError struct {
line int
err error
}
func (err parseBuildConfigError) Error() string { return err.err.Error() }
func parseBuildConfigs(r io.Reader) ([]BuildConfig, error) {
var builds []BuildConfig
br := bufio.NewReader(r)
i := 0
for {
line, err := br.ReadString('\n')
if err != nil {
if err == io.EOF {
break
} else {
return nil, err
}
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
name, envs, flags, err := parseBuildConfig(line)
if err != nil {
return nil, parseBuildConfigError{line: i + 1, err: err}
}
bc := BuildConfig{
Name: name,
Envs: envs,
Flags: flags,
}
builds = append(builds, bc)
i++
}
return builds, nil
}
func parseBuildConfig(line string) (name string, envs []string, flags []string, err error) {
if line == "" {
return "", nil, nil, errors.New("couldn't parse empty build config")
}
if strings.Index(line, ":") == len(line)-1 {
name = line[:len(line)-1]
} else {
idx := strings.Index(line, ": ")
if idx == -1 {
return name, envs, flags, errors.New("missing build name")
}
name = line[:idx]
var buf []rune
var inQuote bool
args := &envs
for _, r := range strings.TrimSpace(line[idx+2:]) {
switch r {
case ' ':
if inQuote {
buf = append(buf, r)
} else if len(buf) != 0 {
if buf[0] == '-' {
args = &flags
}
*args = append(*args, string(buf))
buf = buf[:0]
}
case '"':
inQuote = !inQuote
default:
buf = append(buf, r)
}
}
if len(buf) > 0 {
if inQuote {
return "", nil, nil, errors.New("unterminated quoted string")
}
if buf[0] == '-' {
args = &flags
}
*args = append(*args, string(buf))
}
}
for _, r := range name {
if !(r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r)) {
return "", nil, nil, fmt.Errorf("invalid build name %q", name)
}
}
return name, envs, flags, nil
}

View File

@@ -0,0 +1,55 @@
package lintcmd
import (
"strings"
"honnef.co/go/tools/lintcmd/runner"
)
func parseDirectives(dirs []runner.SerializedDirective) ([]ignore, []diagnostic) {
var ignores []ignore
var diagnostics []diagnostic
for _, dir := range dirs {
cmd := dir.Command
args := dir.Arguments
switch cmd {
case "ignore", "file-ignore":
if len(args) < 2 {
p := diagnostic{
Diagnostic: runner.Diagnostic{
Position: dir.NodePosition,
Message: "malformed linter directive; missing the required reason field?",
Category: "compile",
},
Severity: severityError,
}
diagnostics = append(diagnostics, p)
continue
}
default:
// unknown directive, ignore
continue
}
checks := strings.Split(args[0], ",")
pos := dir.NodePosition
var ig ignore
switch cmd {
case "ignore":
ig = &lineIgnore{
File: pos.Filename,
Line: pos.Line,
Checks: checks,
Pos: dir.DirectivePosition,
}
case "file-ignore":
ig = &fileIgnore{
File: pos.Filename,
Checks: checks,
}
}
ignores = append(ignores, ig)
}
return ignores, diagnostics
}

View File

@@ -0,0 +1,161 @@
package lintcmd
import (
"encoding/json"
"fmt"
"go/token"
"io"
"os"
"path/filepath"
"text/tabwriter"
"honnef.co/go/tools/analysis/lint"
)
func shortPath(path string) string {
cwd, err := os.Getwd()
if err != nil {
return path
}
if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {
return rel
}
return path
}
func relativePositionString(pos token.Position) string {
s := shortPath(pos.Filename)
if pos.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
}
if s == "" {
s = "-"
}
return s
}
type statter interface {
Stats(total, errors, warnings, ignored int)
}
type formatter interface {
Format(checks []*lint.Analyzer, diagnostics []diagnostic)
}
type textFormatter struct {
W io.Writer
}
func (o textFormatter) Format(_ []*lint.Analyzer, ps []diagnostic) {
for _, p := range ps {
fmt.Fprintf(o.W, "%s: %s\n", relativePositionString(p.Position), p.String())
for _, r := range p.Related {
fmt.Fprintf(o.W, "\t%s: %s\n", relativePositionString(r.Position), r.Message)
}
}
}
type nullFormatter struct{}
func (nullFormatter) Format([]*lint.Analyzer, []diagnostic) {}
type jsonFormatter struct {
W io.Writer
}
func (o jsonFormatter) Format(_ []*lint.Analyzer, ps []diagnostic) {
type location struct {
File string `json:"file"`
Line int `json:"line"`
Column int `json:"column"`
}
type related struct {
Location location `json:"location"`
End location `json:"end"`
Message string `json:"message"`
}
enc := json.NewEncoder(o.W)
for _, p := range ps {
jp := struct {
Code string `json:"code"`
Severity string `json:"severity,omitempty"`
Location location `json:"location"`
End location `json:"end"`
Message string `json:"message"`
Related []related `json:"related,omitempty"`
}{
Code: p.Category,
Severity: p.Severity.String(),
Location: location{
File: p.Position.Filename,
Line: p.Position.Line,
Column: p.Position.Column,
},
End: location{
File: p.End.Filename,
Line: p.End.Line,
Column: p.End.Column,
},
Message: p.Message,
}
for _, r := range p.Related {
jp.Related = append(jp.Related, related{
Location: location{
File: r.Position.Filename,
Line: r.Position.Line,
Column: r.Position.Column,
},
End: location{
File: r.End.Filename,
Line: r.End.Line,
Column: r.End.Column,
},
Message: r.Message,
})
}
_ = enc.Encode(jp)
}
}
type stylishFormatter struct {
W io.Writer
prevFile string
tw *tabwriter.Writer
}
func (o *stylishFormatter) Format(_ []*lint.Analyzer, ps []diagnostic) {
for _, p := range ps {
pos := p.Position
if pos.Filename == "" {
pos.Filename = "-"
}
if pos.Filename != o.prevFile {
if o.prevFile != "" {
o.tw.Flush()
fmt.Fprintln(o.W)
}
fmt.Fprintln(o.W, pos.Filename)
o.prevFile = pos.Filename
o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
}
fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Category, p.Message)
for _, r := range p.Related {
fmt.Fprintf(o.tw, " (%d, %d)\t\t %s\n", r.Position.Line, r.Position.Column, r.Message)
}
}
}
func (o *stylishFormatter) Stats(total, errors, warnings, ignored int) {
if o.tw != nil {
o.tw.Flush()
fmt.Fprintln(o.W)
}
fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings, %d ignored)\n",
total, errors, warnings, ignored)
}

View File

@@ -0,0 +1,577 @@
package lintcmd
import (
"crypto/sha256"
"fmt"
"go/build"
"go/token"
"io"
"os"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"honnef.co/go/tools/analysis/lint"
"honnef.co/go/tools/config"
"honnef.co/go/tools/go/buildid"
"honnef.co/go/tools/go/loader"
"honnef.co/go/tools/lintcmd/cache"
"honnef.co/go/tools/lintcmd/runner"
"honnef.co/go/tools/unused"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
)
// A linter lints Go source code.
type linter struct {
Analyzers map[string]*lint.Analyzer
Runner *runner.Runner
}
func computeSalt() ([]byte, error) {
p, err := os.Executable()
if err != nil {
return nil, err
}
if id, err := buildid.ReadFile(p); err == nil {
return []byte(id), nil
} else {
// For some reason we couldn't read the build id from the executable.
// Fall back to hashing the entire executable.
f, err := os.Open(p)
if err != nil {
return nil, err
}
defer f.Close()
h := sha256.New()
if _, err := io.Copy(h, f); err != nil {
return nil, err
}
return h.Sum(nil), nil
}
}
func newLinter(cfg config.Config) (*linter, error) {
c, err := cache.Default()
if err != nil {
return nil, err
}
salt, err := computeSalt()
if err != nil {
return nil, fmt.Errorf("could not compute salt for cache: %s", err)
}
c.SetSalt(salt)
r, err := runner.New(cfg, c)
if err != nil {
return nil, err
}
r.FallbackGoVersion = defaultGoVersion()
return &linter{
Runner: r,
}, nil
}
type LintResult struct {
CheckedFiles []string
Diagnostics []diagnostic
Warnings []string
}
func (l *linter) Lint(cfg *packages.Config, patterns []string) (LintResult, error) {
var out LintResult
as := make([]*analysis.Analyzer, 0, len(l.Analyzers))
for _, a := range l.Analyzers {
as = append(as, a.Analyzer)
}
results, err := l.Runner.Run(cfg, as, patterns)
if err != nil {
return out, err
}
if len(results) == 0 {
// TODO(dh): emulate Go's behavior more closely once we have
// access to go list's Match field.
for _, pattern := range patterns {
fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
}
}
analyzerNames := make([]string, 0, len(l.Analyzers))
for name := range l.Analyzers {
analyzerNames = append(analyzerNames, name)
}
used := map[unusedKey]bool{}
var unuseds []unusedPair
for _, res := range results {
if len(res.Errors) > 0 && !res.Failed {
panic("package has errors but isn't marked as failed")
}
if res.Failed {
out.Diagnostics = append(out.Diagnostics, failed(res)...)
} else {
if res.Skipped {
out.Warnings = append(out.Warnings, fmt.Sprintf("skipped package %s because it is too large", res.Package))
continue
}
if !res.Initial {
continue
}
out.CheckedFiles = append(out.CheckedFiles, res.Package.GoFiles...)
allowedAnalyzers := filterAnalyzerNames(analyzerNames, res.Config.Checks)
resd, err := res.Load()
if err != nil {
return out, err
}
ps := success(allowedAnalyzers, resd)
filtered, err := filterIgnored(ps, resd, allowedAnalyzers)
if err != nil {
return out, err
}
// OPT move this code into the 'success' function.
for i, diag := range filtered {
a := l.Analyzers[diag.Category]
// Some diag.Category don't map to analyzers, such as "staticcheck"
if a != nil {
filtered[i].MergeIf = a.Doc.MergeIf
}
}
out.Diagnostics = append(out.Diagnostics, filtered...)
for _, obj := range resd.Unused.Used {
// FIXME(dh): pick the object whose filename does not include $GOROOT
key := unusedKey{
pkgPath: res.Package.PkgPath,
base: filepath.Base(obj.Position.Filename),
line: obj.Position.Line,
name: obj.Name,
}
used[key] = true
}
if allowedAnalyzers["U1000"] {
for _, obj := range resd.Unused.Unused {
key := unusedKey{
pkgPath: res.Package.PkgPath,
base: filepath.Base(obj.Position.Filename),
line: obj.Position.Line,
name: obj.Name,
}
unuseds = append(unuseds, unusedPair{key, obj})
if _, ok := used[key]; !ok {
used[key] = false
}
}
}
}
}
for _, uo := range unuseds {
if uo.obj.Kind == "type param" {
// We don't currently flag unused type parameters on used objects, and flagging them on unused objects isn't
// useful.
continue
}
if used[uo.key] {
continue
}
if uo.obj.InGenerated {
continue
}
out.Diagnostics = append(out.Diagnostics, diagnostic{
Diagnostic: runner.Diagnostic{
Position: uo.obj.DisplayPosition,
Message: fmt.Sprintf("%s %s is unused", uo.obj.Kind, uo.obj.Name),
Category: "U1000",
},
MergeIf: lint.MergeIfAll,
})
}
return out, nil
}
func filterIgnored(diagnostics []diagnostic, res runner.ResultData, allowedAnalyzers map[string]bool) ([]diagnostic, error) {
couldHaveMatched := func(ig *lineIgnore) bool {
for _, c := range ig.Checks {
if c == "U1000" {
// We never want to flag ignores for U1000,
// because U1000 isn't local to a single
// package. For example, an identifier may
// only be used by tests, in which case an
// ignore would only fire when not analyzing
// tests. To avoid spurious "useless ignore"
// warnings, just never flag U1000.
return false
}
// Even though the runner always runs all analyzers, we
// still only flag unmatched ignores for the set of
// analyzers the user has expressed interest in. That way,
// `staticcheck -checks=SA1000` won't complain about an
// unmatched ignore for an unrelated check.
if allowedAnalyzers[c] {
return true
}
}
return false
}
ignores, moreDiagnostics := parseDirectives(res.Directives)
for _, ig := range ignores {
for i := range diagnostics {
diag := &diagnostics[i]
if ig.Match(*diag) {
diag.Severity = severityIgnored
}
}
if ig, ok := ig.(*lineIgnore); ok && !ig.Matched && couldHaveMatched(ig) {
diag := diagnostic{
Diagnostic: runner.Diagnostic{
Position: ig.Pos,
Message: "this linter directive didn't match anything; should it be removed?",
Category: "staticcheck",
},
}
moreDiagnostics = append(moreDiagnostics, diag)
}
}
return append(diagnostics, moreDiagnostics...), nil
}
type ignore interface {
Match(diag diagnostic) bool
}
type lineIgnore struct {
File string
Line int
Checks []string
Matched bool
Pos token.Position
}
func (li *lineIgnore) Match(p diagnostic) bool {
pos := p.Position
if pos.Filename != li.File || pos.Line != li.Line {
return false
}
for _, c := range li.Checks {
if m, _ := filepath.Match(c, p.Category); m {
li.Matched = true
return true
}
}
return false
}
func (li *lineIgnore) String() string {
matched := "not matched"
if li.Matched {
matched = "matched"
}
return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
}
type fileIgnore struct {
File string
Checks []string
}
func (fi *fileIgnore) Match(p diagnostic) bool {
if p.Position.Filename != fi.File {
return false
}
for _, c := range fi.Checks {
if m, _ := filepath.Match(c, p.Category); m {
return true
}
}
return false
}
type severity uint8
const (
severityError severity = iota
severityWarning
severityIgnored
)
func (s severity) String() string {
switch s {
case severityError:
return "error"
case severityWarning:
return "warning"
case severityIgnored:
return "ignored"
default:
return fmt.Sprintf("Severity(%d)", s)
}
}
// diagnostic represents a diagnostic in some source code.
type diagnostic struct {
runner.Diagnostic
Severity severity
MergeIf lint.MergeStrategy
BuildName string
}
func (p diagnostic) equal(o diagnostic) bool {
return p.Position == o.Position &&
p.End == o.End &&
p.Message == o.Message &&
p.Category == o.Category &&
p.Severity == o.Severity &&
p.MergeIf == o.MergeIf &&
p.BuildName == o.BuildName
}
func (p *diagnostic) String() string {
if p.BuildName != "" {
return fmt.Sprintf("%s [%s] (%s)", p.Message, p.BuildName, p.Category)
} else {
return fmt.Sprintf("%s (%s)", p.Message, p.Category)
}
}
func failed(res runner.Result) []diagnostic {
var diagnostics []diagnostic
for _, e := range res.Errors {
switch e := e.(type) {
case packages.Error:
msg := e.Msg
if len(msg) != 0 && msg[0] == '\n' {
// TODO(dh): See https://github.com/golang/go/issues/32363
msg = msg[1:]
}
var posn token.Position
if e.Pos == "" {
// Under certain conditions (malformed package
// declarations, multiple packages in the same
// directory), go list emits an error on stderr
// instead of JSON. Those errors do not have
// associated position information in
// go/packages.Error, even though the output on
// stderr may contain it.
if p, n, err := parsePos(msg); err == nil {
if abs, err := filepath.Abs(p.Filename); err == nil {
p.Filename = abs
}
posn = p
msg = msg[n+2:]
}
} else {
var err error
posn, _, err = parsePos(e.Pos)
if err != nil {
panic(fmt.Sprintf("internal error: %s", e))
}
}
diag := diagnostic{
Diagnostic: runner.Diagnostic{
Position: posn,
Message: msg,
Category: "compile",
},
Severity: severityError,
}
diagnostics = append(diagnostics, diag)
case error:
diag := diagnostic{
Diagnostic: runner.Diagnostic{
Position: token.Position{},
Message: e.Error(),
Category: "compile",
},
Severity: severityError,
}
diagnostics = append(diagnostics, diag)
}
}
return diagnostics
}
type unusedKey struct {
pkgPath string
base string
line int
name string
}
type unusedPair struct {
key unusedKey
obj unused.SerializedObject
}
func success(allowedAnalyzers map[string]bool, res runner.ResultData) []diagnostic {
diags := res.Diagnostics
var diagnostics []diagnostic
for _, diag := range diags {
if !allowedAnalyzers[diag.Category] {
continue
}
diagnostics = append(diagnostics, diagnostic{Diagnostic: diag})
}
return diagnostics
}
func defaultGoVersion() string {
tags := build.Default.ReleaseTags
v := tags[len(tags)-1][2:]
return v
}
func filterAnalyzerNames(analyzers []string, checks []string) map[string]bool {
allowedChecks := map[string]bool{}
for _, check := range checks {
b := true
if len(check) > 1 && check[0] == '-' {
b = false
check = check[1:]
}
if check == "*" || check == "all" {
// Match all
for _, c := range analyzers {
allowedChecks[c] = b
}
} else if strings.HasSuffix(check, "*") {
// Glob
prefix := check[:len(check)-1]
isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
for _, a := range analyzers {
idx := strings.IndexFunc(a, func(r rune) bool { return unicode.IsNumber(r) })
if isCat {
// Glob is S*, which should match S1000 but not SA1000
cat := a[:idx]
if prefix == cat {
allowedChecks[a] = b
}
} else {
// Glob is S1*
if strings.HasPrefix(a, prefix) {
allowedChecks[a] = b
}
}
}
} else {
// Literal check name
allowedChecks[check] = b
}
}
return allowedChecks
}
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`)
func parsePos(pos string) (token.Position, int, error) {
if pos == "-" || pos == "" {
return token.Position{}, 0, nil
}
parts := posRe.FindStringSubmatch(pos)
if parts == nil {
return token.Position{}, 0, fmt.Errorf("internal error: malformed position %q", pos)
}
file := parts[1]
line, _ := strconv.Atoi(parts[2])
col, _ := strconv.Atoi(parts[3])
return token.Position{
Filename: file,
Line: line,
Column: col,
}, len(parts[0]), nil
}
type options struct {
Config config.Config
BuildConfig BuildConfig
LintTests bool
GoVersion string
PrintAnalyzerMeasurement func(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration)
}
func doLint(as []*lint.Analyzer, paths []string, opt *options) (LintResult, error) {
if opt == nil {
opt = &options{}
}
l, err := newLinter(opt.Config)
if err != nil {
return LintResult{}, err
}
analyzers := make(map[string]*lint.Analyzer, len(as))
for _, a := range as {
analyzers[a.Analyzer.Name] = a
}
l.Analyzers = analyzers
l.Runner.GoVersion = opt.GoVersion
l.Runner.Stats.PrintAnalyzerMeasurement = opt.PrintAnalyzerMeasurement
cfg := &packages.Config{}
if opt.LintTests {
cfg.Tests = true
}
cfg.BuildFlags = opt.BuildConfig.Flags
cfg.Env = append(os.Environ(), opt.BuildConfig.Envs...)
printStats := func() {
// Individual stats are read atomically, but overall there
// is no synchronisation. For printing rough progress
// information, this doesn't matter.
switch l.Runner.Stats.State() {
case runner.StateInitializing:
fmt.Fprintln(os.Stderr, "Status: initializing")
case runner.StateLoadPackageGraph:
fmt.Fprintln(os.Stderr, "Status: loading package graph")
case runner.StateBuildActionGraph:
fmt.Fprintln(os.Stderr, "Status: building action graph")
case runner.StateProcessing:
fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d\n",
l.Runner.Stats.ProcessedInitialPackages(),
l.Runner.Stats.InitialPackages(),
l.Runner.Stats.ProcessedPackages(),
l.Runner.Stats.TotalPackages(),
l.Runner.ActiveWorkers(),
l.Runner.TotalWorkers(),
)
case runner.StateFinalizing:
fmt.Fprintln(os.Stderr, "Status: finalizing")
}
}
if len(infoSignals) > 0 {
ch := make(chan os.Signal, 1)
signal.Notify(ch, infoSignals...)
defer signal.Stop(ch)
go func() {
for range ch {
printStats()
}
}()
}
res, err := l.Lint(cfg, paths)
for i := range res.Diagnostics {
res.Diagnostics[i].BuildName = opt.BuildConfig.Name
}
return res, err
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,49 @@
package runner
import (
"sync/atomic"
"time"
"honnef.co/go/tools/go/loader"
"golang.org/x/tools/go/analysis"
)
const (
StateInitializing = iota
StateLoadPackageGraph
StateBuildActionGraph
StateProcessing
StateFinalizing
)
type Stats struct {
state uint32
initialPackages uint32
totalPackages uint32
processedPackages uint32
processedInitialPackages uint32
// optional function to call every time an analyzer has finished analyzing a package.
PrintAnalyzerMeasurement func(*analysis.Analyzer, *loader.PackageSpec, time.Duration)
}
func (s *Stats) setState(state uint32) { atomic.StoreUint32(&s.state, state) }
func (s *Stats) State() int { return int(atomic.LoadUint32(&s.state)) }
func (s *Stats) setInitialPackages(n int) { atomic.StoreUint32(&s.initialPackages, uint32(n)) }
func (s *Stats) InitialPackages() int { return int(atomic.LoadUint32(&s.initialPackages)) }
func (s *Stats) setTotalPackages(n int) { atomic.StoreUint32(&s.totalPackages, uint32(n)) }
func (s *Stats) TotalPackages() int { return int(atomic.LoadUint32(&s.totalPackages)) }
func (s *Stats) finishPackage() { atomic.AddUint32(&s.processedPackages, 1) }
func (s *Stats) finishInitialPackage() { atomic.AddUint32(&s.processedInitialPackages, 1) }
func (s *Stats) ProcessedPackages() int { return int(atomic.LoadUint32(&s.processedPackages)) }
func (s *Stats) ProcessedInitialPackages() int {
return int(atomic.LoadUint32(&s.processedInitialPackages))
}
func (s *Stats) measureAnalyzer(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration) {
if s.PrintAnalyzerMeasurement != nil {
s.PrintAnalyzerMeasurement(analysis, pkg, d)
}
}

View File

@@ -0,0 +1,370 @@
package lintcmd
// Notes on GitHub-specific restrictions:
//
// Result.Message needs to either have ID or Text set. Markdown
// gets ignored. Text isn't treated verbatim however: Markdown
// formatting gets stripped, except for links.
//
// GitHub does not display RelatedLocations. The only way to make
// use of them is to link to them (via their ID) in the
// Result.Message. And even then, it will only show the referred
// line of code, not the message. We can duplicate the messages in
// the Result.Message, but we can't even indent them, because
// leading whitespace gets stripped.
//
// GitHub does use the Markdown version of rule help, but it
// renders it the way it renders comments on issues that is, it
// turns line breaks into hard line breaks, even though it
// shouldn't.
//
// GitHub doesn't make use of the tool's URI or version, nor of
// the help URIs of rules.
//
// There does not seem to be a way of using SARIF for "normal" CI,
// without results showing up as code scanning alerts. Also, a
// SARIF file containing only warnings, no errors, will not fail
// CI by default, but this is configurable.
// GitHub does display some parts of SARIF results in PRs, but
// most of the useful parts of SARIF, such as help text of rules,
// is only accessible via the code scanning alerts, which are only
// accessible by users with write permissions.
//
// Result.Suppressions is being ignored.
//
//
// Notes on other tools
//
// VS Code Sarif viewer
//
// The Sarif viewer in VS Code displays the full message in the
// tabular view, removing newlines. That makes our multi-line
// messages (which we use as a workaround for missing related
// information) very ugly.
//
// Much like GitHub, the Sarif viewer does not make related
// information visible unless we explicitly refer to it in the
// message.
//
// Suggested fixes are not exposed in any way.
//
// It only shows the shortDescription or fullDescription of a
// rule, not its help. We can't put the help in fullDescription,
// because the fullDescription isn't meant to be that long. For
// example, GitHub displays it in a single line, under the
// shortDescription.
//
// VS Code can filter based on Result.Suppressions, but it doesn't
// display our suppression message. Also, by default, suppressed
// results get shown, and the column indicating that a result is
// suppressed is hidden, which makes for a confusing experience.
//
// When a rule has only an ID, no name, VS Code displays a
// prominent dash in place of the name. When the name and ID are
// identical, it prints both. However, we can't make them
// identical, as SARIF requires that either the ID and name are
// different, or that the name is omitted.
// FIXME(dh): we're currently reporting column information using UTF-8
// byte offsets, not using Unicode code points or UTF-16, which are
// the only two ways allowed by SARIF.
// TODO(dh) set properties.tags we can use different tags for the
// staticcheck, simple, stylecheck and unused checks, so users can
// filter their results
import (
"encoding/json"
"fmt"
"net/url"
"os"
"path/filepath"
"regexp"
"strings"
"honnef.co/go/tools/analysis/lint"
"honnef.co/go/tools/sarif"
)
type sarifFormatter struct {
driverName string
driverVersion string
driverWebsite string
}
func sarifLevel(severity lint.Severity) string {
switch severity {
case lint.SeverityNone:
// no configured severity, default to warning
return "warning"
case lint.SeverityError:
return "error"
case lint.SeverityDeprecated:
return "warning"
case lint.SeverityWarning:
return "warning"
case lint.SeverityInfo:
return "note"
case lint.SeverityHint:
return "note"
default:
// unreachable
return "none"
}
}
func encodePath(path string) string {
return (&url.URL{Path: path}).EscapedPath()
}
func sarifURI(path string) string {
u := url.URL{
Scheme: "file",
Path: path,
}
return u.String()
}
func sarifArtifactLocation(name string) sarif.ArtifactLocation {
// Ideally we use relative paths so that GitHub can resolve them
name = shortPath(name)
if filepath.IsAbs(name) {
return sarif.ArtifactLocation{
URI: sarifURI(name),
}
} else {
return sarif.ArtifactLocation{
URI: encodePath(name),
URIBaseID: "%SRCROOT%", // This is specific to GitHub,
}
}
}
func sarifFormatText(s string) string {
// GitHub doesn't ignore line breaks, even though it should, so we remove them.
var out strings.Builder
lines := strings.Split(s, "\n")
for i, line := range lines[:len(lines)-1] {
out.WriteString(line)
if line == "" {
out.WriteString("\n")
} else {
nextLine := lines[i+1]
if nextLine == "" || strings.HasPrefix(line, "> ") || strings.HasPrefix(line, " ") {
out.WriteString("\n")
} else {
out.WriteString(" ")
}
}
}
out.WriteString(lines[len(lines)-1])
return convertCodeBlocks(out.String())
}
func moreCodeFollows(lines []string) bool {
for _, line := range lines {
if line == "" {
continue
}
if strings.HasPrefix(line, " ") {
return true
} else {
return false
}
}
return false
}
var alpha = regexp.MustCompile(`^[a-zA-Z ]+$`)
func convertCodeBlocks(text string) string {
var buf strings.Builder
lines := strings.Split(text, "\n")
inCode := false
empties := 0
for i, line := range lines {
if inCode {
if !moreCodeFollows(lines[i:]) {
if inCode {
fmt.Fprintln(&buf, "```")
inCode = false
}
}
}
prevEmpties := empties
if line == "" && !inCode {
empties++
} else {
empties = 0
}
if line == "" {
fmt.Fprintln(&buf)
continue
}
if strings.HasPrefix(line, " ") {
line = line[4:]
if !inCode {
fmt.Fprintln(&buf, "```go")
inCode = true
}
}
onlyAlpha := alpha.MatchString(line)
out := line
if !inCode && prevEmpties >= 2 && onlyAlpha {
fmt.Fprintf(&buf, "## %s\n", out)
} else {
fmt.Fprint(&buf, out)
fmt.Fprintln(&buf)
}
}
if inCode {
fmt.Fprintln(&buf, "```")
}
return buf.String()
}
func (o *sarifFormatter) Format(checks []*lint.Analyzer, diagnostics []diagnostic) {
// TODO(dh): some diagnostics shouldn't be reported as results. For example, when the user specifies a package on the command line that doesn't exist.
cwd, _ := os.Getwd()
run := sarif.Run{
Tool: sarif.Tool{
Driver: sarif.ToolComponent{
Name: o.driverName,
Version: o.driverVersion,
InformationURI: o.driverWebsite,
},
},
Invocations: []sarif.Invocation{{
Arguments: os.Args[1:],
WorkingDirectory: sarif.ArtifactLocation{
URI: sarifURI(cwd),
},
ExecutionSuccessful: true,
}},
}
for _, c := range checks {
run.Tool.Driver.Rules = append(run.Tool.Driver.Rules,
sarif.ReportingDescriptor{
// We don't set Name, as Name and ID mustn't be identical.
ID: c.Analyzer.Name,
ShortDescription: sarif.Message{
Text: c.Doc.Title,
Markdown: c.Doc.TitleMarkdown,
},
HelpURI: "https://staticcheck.io/docs/checks#" + c.Analyzer.Name,
// We use our markdown as the plain text version, too. We
// use very little markdown, primarily quotations,
// indented code blocks and backticks. All of these are
// fine as plain text, too.
Help: sarif.Message{
Text: sarifFormatText(c.Doc.Format(false)),
Markdown: sarifFormatText(c.Doc.FormatMarkdown(false)),
},
DefaultConfiguration: sarif.ReportingConfiguration{
// TODO(dh): we could figure out which checks were disabled globally
Enabled: true,
Level: sarifLevel(c.Doc.Severity),
},
})
}
for _, p := range diagnostics {
r := sarif.Result{
RuleID: p.Category,
Kind: sarif.Fail,
Message: sarif.Message{
Text: p.Message,
},
}
r.Locations = []sarif.Location{{
PhysicalLocation: sarif.PhysicalLocation{
ArtifactLocation: sarifArtifactLocation(p.Position.Filename),
Region: sarif.Region{
StartLine: p.Position.Line,
StartColumn: p.Position.Column,
EndLine: p.End.Line,
EndColumn: p.End.Column,
},
},
}}
for _, fix := range p.SuggestedFixes {
sfix := sarif.Fix{
Description: sarif.Message{
Text: fix.Message,
},
}
// file name -> replacements
changes := map[string][]sarif.Replacement{}
for _, edit := range fix.TextEdits {
changes[edit.Position.Filename] = append(changes[edit.Position.Filename], sarif.Replacement{
DeletedRegion: sarif.Region{
StartLine: edit.Position.Line,
StartColumn: edit.Position.Column,
EndLine: edit.End.Line,
EndColumn: edit.End.Column,
},
InsertedContent: sarif.ArtifactContent{
Text: string(edit.NewText),
},
})
}
for path, replacements := range changes {
sfix.ArtifactChanges = append(sfix.ArtifactChanges, sarif.ArtifactChange{
ArtifactLocation: sarifArtifactLocation(path),
Replacements: replacements,
})
}
r.Fixes = append(r.Fixes, sfix)
}
for i, related := range p.Related {
r.Message.Text += fmt.Sprintf("\n\t[%s](%d)", related.Message, i+1)
r.RelatedLocations = append(r.RelatedLocations,
sarif.Location{
ID: i + 1,
Message: &sarif.Message{
Text: related.Message,
},
PhysicalLocation: sarif.PhysicalLocation{
ArtifactLocation: sarifArtifactLocation(related.Position.Filename),
Region: sarif.Region{
StartLine: related.Position.Line,
StartColumn: related.Position.Column,
EndLine: related.End.Line,
EndColumn: related.End.Column,
},
},
})
}
if p.Severity == severityIgnored {
// Note that GitHub does not support suppressions, which is why Staticcheck still requires the -show-ignored flag to be set for us to emit ignored diagnostics.
r.Suppressions = []sarif.Suppression{{
Kind: "inSource",
// TODO(dh): populate the Justification field
}}
} else {
// We want an empty slice, not nil. SARIF differentiates
// between the two. An empty slice means that the diagnostic
// wasn't suppressed, while nil means that we don't have the
// information available.
r.Suppressions = []sarif.Suppression{}
}
run.Results = append(run.Results, r)
}
json.NewEncoder(os.Stdout).Encode(sarif.Log{
Version: sarif.Version,
Schema: sarif.Schema,
Runs: []sarif.Run{run},
})
}

View File

@@ -0,0 +1,8 @@
//go:build !aix && !android && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
// +build !aix,!android,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package lintcmd
import "os"
var infoSignals = []os.Signal{}

View File

@@ -0,0 +1,11 @@
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
// +build darwin dragonfly freebsd netbsd openbsd
package lintcmd
import (
"os"
"syscall"
)
var infoSignals = []os.Signal{syscall.SIGINFO}

View File

@@ -0,0 +1,11 @@
//go:build aix || android || linux || solaris
// +build aix android linux solaris
package lintcmd
import (
"os"
"syscall"
)
var infoSignals = []os.Signal{syscall.SIGUSR1}

View File

@@ -0,0 +1,44 @@
package version
import (
"fmt"
"runtime/debug"
)
func printBuildInfo() {
if info, ok := debug.ReadBuildInfo(); ok {
fmt.Println("Main module:")
printModule(&info.Main)
fmt.Println("Dependencies:")
for _, dep := range info.Deps {
printModule(dep)
}
} else {
fmt.Println("Built without Go modules")
}
}
func buildInfoVersion() (string, bool) {
info, ok := debug.ReadBuildInfo()
if !ok {
return "", false
}
if info.Main.Version == "(devel)" {
return "", false
}
return info.Main.Version, true
}
func printModule(m *debug.Module) {
fmt.Printf("\t%s", m.Path)
if m.Version != "(devel)" {
fmt.Printf("@%s", m.Version)
}
if m.Sum != "" {
fmt.Printf(" (sum: %s)", m.Sum)
}
if m.Replace != nil {
fmt.Printf(" (replace: %s)", m.Replace.Path)
}
fmt.Println()
}

View File

@@ -0,0 +1,43 @@
package version
import (
"fmt"
"os"
"path/filepath"
"runtime"
)
const Version = "2022.1.1"
const MachineVersion = "v0.3.1"
// version returns a version descriptor and reports whether the
// version is a known release.
func version(human, machine string) (human_, machine_ string, known bool) {
if human != "devel" {
return human, machine, true
}
v, ok := buildInfoVersion()
if ok {
return v, "", false
}
return "devel", "", false
}
func Print(human, machine string) {
human, machine, release := version(human, machine)
if release {
fmt.Printf("%s %s (%s)\n", filepath.Base(os.Args[0]), human, machine)
} else if human == "devel" {
fmt.Printf("%s (no version)\n", filepath.Base(os.Args[0]))
} else {
fmt.Printf("%s (devel, %s)\n", filepath.Base(os.Args[0]), human)
}
}
func Verbose(human, machine string) {
Print(human, machine)
fmt.Println()
fmt.Println("Compiled with Go version:", runtime.Version())
printBuildInfo()
}

View File

@@ -0,0 +1,245 @@
package pattern
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"reflect"
"golang.org/x/exp/typeparams"
)
var astTypes = map[string]reflect.Type{
"Ellipsis": reflect.TypeOf(ast.Ellipsis{}),
"RangeStmt": reflect.TypeOf(ast.RangeStmt{}),
"AssignStmt": reflect.TypeOf(ast.AssignStmt{}),
"IndexExpr": reflect.TypeOf(ast.IndexExpr{}),
"IndexListExpr": reflect.TypeOf(typeparams.IndexListExpr{}),
"Ident": reflect.TypeOf(ast.Ident{}),
"ValueSpec": reflect.TypeOf(ast.ValueSpec{}),
"GenDecl": reflect.TypeOf(ast.GenDecl{}),
"BinaryExpr": reflect.TypeOf(ast.BinaryExpr{}),
"ForStmt": reflect.TypeOf(ast.ForStmt{}),
"ArrayType": reflect.TypeOf(ast.ArrayType{}),
"DeferStmt": reflect.TypeOf(ast.DeferStmt{}),
"MapType": reflect.TypeOf(ast.MapType{}),
"ReturnStmt": reflect.TypeOf(ast.ReturnStmt{}),
"SliceExpr": reflect.TypeOf(ast.SliceExpr{}),
"StarExpr": reflect.TypeOf(ast.StarExpr{}),
"UnaryExpr": reflect.TypeOf(ast.UnaryExpr{}),
"SendStmt": reflect.TypeOf(ast.SendStmt{}),
"SelectStmt": reflect.TypeOf(ast.SelectStmt{}),
"ImportSpec": reflect.TypeOf(ast.ImportSpec{}),
"IfStmt": reflect.TypeOf(ast.IfStmt{}),
"GoStmt": reflect.TypeOf(ast.GoStmt{}),
"Field": reflect.TypeOf(ast.Field{}),
"SelectorExpr": reflect.TypeOf(ast.SelectorExpr{}),
"StructType": reflect.TypeOf(ast.StructType{}),
"KeyValueExpr": reflect.TypeOf(ast.KeyValueExpr{}),
"FuncType": reflect.TypeOf(ast.FuncType{}),
"FuncLit": reflect.TypeOf(ast.FuncLit{}),
"FuncDecl": reflect.TypeOf(ast.FuncDecl{}),
"ChanType": reflect.TypeOf(ast.ChanType{}),
"CallExpr": reflect.TypeOf(ast.CallExpr{}),
"CaseClause": reflect.TypeOf(ast.CaseClause{}),
"CommClause": reflect.TypeOf(ast.CommClause{}),
"CompositeLit": reflect.TypeOf(ast.CompositeLit{}),
"EmptyStmt": reflect.TypeOf(ast.EmptyStmt{}),
"SwitchStmt": reflect.TypeOf(ast.SwitchStmt{}),
"TypeSwitchStmt": reflect.TypeOf(ast.TypeSwitchStmt{}),
"TypeAssertExpr": reflect.TypeOf(ast.TypeAssertExpr{}),
"TypeSpec": reflect.TypeOf(ast.TypeSpec{}),
"InterfaceType": reflect.TypeOf(ast.InterfaceType{}),
"BranchStmt": reflect.TypeOf(ast.BranchStmt{}),
"IncDecStmt": reflect.TypeOf(ast.IncDecStmt{}),
"BasicLit": reflect.TypeOf(ast.BasicLit{}),
}
func ASTToNode(node interface{}) Node {
switch node := node.(type) {
case *ast.File:
panic("cannot convert *ast.File to Node")
case nil:
return Nil{}
case string:
return String(node)
case token.Token:
return Token(node)
case *ast.ExprStmt:
return ASTToNode(node.X)
case *ast.BlockStmt:
if node == nil {
return Nil{}
}
return ASTToNode(node.List)
case *ast.FieldList:
if node == nil {
return Nil{}
}
return ASTToNode(node.List)
case *ast.BasicLit:
if node == nil {
return Nil{}
}
case *ast.ParenExpr:
return ASTToNode(node.X)
}
if node, ok := node.(ast.Node); ok {
name := reflect.TypeOf(node).Elem().Name()
T, ok := structNodes[name]
if !ok {
panic(fmt.Sprintf("internal error: unhandled type %T", node))
}
if reflect.ValueOf(node).IsNil() {
return Nil{}
}
v := reflect.ValueOf(node).Elem()
objs := make([]Node, T.NumField())
for i := 0; i < T.NumField(); i++ {
f := v.FieldByName(T.Field(i).Name)
objs[i] = ASTToNode(f.Interface())
}
n, err := populateNode(name, objs, false)
if err != nil {
panic(fmt.Sprintf("internal error: %s", err))
}
return n
}
s := reflect.ValueOf(node)
if s.Kind() == reflect.Slice {
if s.Len() == 0 {
return List{}
}
if s.Len() == 1 {
return ASTToNode(s.Index(0).Interface())
}
tail := List{}
for i := s.Len() - 1; i >= 0; i-- {
head := ASTToNode(s.Index(i).Interface())
l := List{
Head: head,
Tail: tail,
}
tail = l
}
return tail
}
panic(fmt.Sprintf("internal error: unhandled type %T", node))
}
func NodeToAST(node Node, state State) interface{} {
switch node := node.(type) {
case Binding:
v, ok := state[node.Name]
if !ok {
// really we want to return an error here
panic("XXX")
}
switch v := v.(type) {
case types.Object:
return &ast.Ident{Name: v.Name()}
default:
return v
}
case Builtin, Any, Object, Function, Not, Or:
panic("XXX")
case List:
if (node == List{}) {
return []ast.Node{}
}
x := []ast.Node{NodeToAST(node.Head, state).(ast.Node)}
x = append(x, NodeToAST(node.Tail, state).([]ast.Node)...)
return x
case Token:
return token.Token(node)
case String:
return string(node)
case Nil:
return nil
}
name := reflect.TypeOf(node).Name()
T, ok := astTypes[name]
if !ok {
panic(fmt.Sprintf("internal error: unhandled type %T", node))
}
v := reflect.ValueOf(node)
out := reflect.New(T)
for i := 0; i < T.NumField(); i++ {
fNode := v.FieldByName(T.Field(i).Name)
if (fNode == reflect.Value{}) {
continue
}
fAST := out.Elem().FieldByName(T.Field(i).Name)
switch fAST.Type().Kind() {
case reflect.Slice:
c := reflect.ValueOf(NodeToAST(fNode.Interface().(Node), state))
if c.Kind() != reflect.Slice {
// it's a single node in the pattern, we have to wrap
// it in a slice
slice := reflect.MakeSlice(fAST.Type(), 1, 1)
slice.Index(0).Set(c)
c = slice
}
switch fAST.Interface().(type) {
case []ast.Node:
switch cc := c.Interface().(type) {
case []ast.Node:
fAST.Set(c)
case []ast.Expr:
var slice []ast.Node
for _, el := range cc {
slice = append(slice, el)
}
fAST.Set(reflect.ValueOf(slice))
default:
panic("XXX")
}
case []ast.Expr:
switch cc := c.Interface().(type) {
case []ast.Node:
var slice []ast.Expr
for _, el := range cc {
slice = append(slice, el.(ast.Expr))
}
fAST.Set(reflect.ValueOf(slice))
case []ast.Expr:
fAST.Set(c)
default:
panic("XXX")
}
default:
panic("XXX")
}
case reflect.Int:
c := reflect.ValueOf(NodeToAST(fNode.Interface().(Node), state))
switch c.Kind() {
case reflect.String:
tok, ok := tokensByString[c.Interface().(string)]
if !ok {
// really we want to return an error here
panic("XXX")
}
fAST.SetInt(int64(tok))
case reflect.Int:
fAST.Set(c)
default:
panic(fmt.Sprintf("internal error: unexpected kind %s", c.Kind()))
}
default:
r := NodeToAST(fNode.Interface().(Node), state)
if r != nil {
fAST.Set(reflect.ValueOf(r))
}
}
}
return out.Interface().(ast.Node)
}

273
vendor/honnef.co/go/tools/pattern/doc.go vendored Normal file
View File

@@ -0,0 +1,273 @@
/*
Package pattern implements a simple language for pattern matching Go ASTs.
Design decisions and trade-offs
The language is designed specifically for the task of filtering ASTs
to simplify the implementation of analyses in staticcheck.
It is also intended to be trivial to parse and execute.
To that end, we make certain decisions that make the language more
suited to its task, while making certain queries infeasible.
Furthermore, it is fully expected that the majority of analyses will still require ordinary Go code
to further process the filtered AST, to make use of type information and to enforce complex invariants.
It is not our goal to design a scripting language for writing entire checks in.
The language
At its core, patterns are a representation of Go ASTs, allowing for the use of placeholders to enable pattern matching.
Their syntax is inspired by LISP and Haskell, but unlike LISP, the core unit of patterns isn't the list, but the node.
There is a fixed set of nodes, identified by name, and with the exception of the Or node, all nodes have a fixed number of arguments.
In addition to nodes, there are atoms, which represent basic units such as strings or the nil value.
Pattern matching is implemented via bindings, represented by the Binding node.
A Binding can match nodes and associate them with names, to later recall the nodes.
This allows for expressing "this node must be equal to that node" constraints.
To simplify writing and reading patterns, a small amount of additional syntax exists on top of nodes and atoms.
This additional syntax doesn't add any new features of its own, it simply provides shortcuts to creating nodes and atoms.
To show an example of a pattern, first consider this snippet of Go code:
if x := fn(); x != nil {
for _, v := range x {
println(v, x)
}
}
The corresponding AST expressed as an idiomatic pattern would look as follows:
(IfStmt
(AssignStmt (Ident "x") ":=" (CallExpr (Ident "fn") []))
(BinaryExpr (Ident "x") "!=" (Ident "nil"))
(RangeStmt
(Ident "_") (Ident "v") ":=" (Ident "x")
(CallExpr (Ident "println") [(Ident "v") (Ident "x")]))
nil)
Two things are worth noting about this representation.
First, the [el1 el2 ...] syntax is a short-hand for creating lists.
It is a short-hand for el1:el2:[], which itself is a short-hand for (List el1 (List el2 (List nil nil)).
Second, note the absence of a lot of lists in places that normally accept lists.
For example, assignment assigns a number of right-hands to a number of left-hands, yet our AssignStmt is lacking any form of list.
This is due to the fact that a single node can match a list of exactly one element.
Thus, the two following forms have identical matching behavior:
(AssignStmt (Ident "x") ":=" (CallExpr (Ident "fn") []))
(AssignStmt [(Ident "x")] ":=" [(CallExpr (Ident "fn") [])])
This section serves as an overview of the language's syntax.
More in-depth explanations of the matching behavior as well as an exhaustive list of node types follows in the coming sections.
Pattern matching
TODO write about pattern matching
- inspired by haskell syntax, but much, much simpler and naive
Node types
The language contains two kinds of nodes: those that map to nodes in the AST, and those that implement additional logic.
Nodes that map directly to AST nodes are named identically to the types in the go/ast package.
What follows is an exhaustive list of these nodes:
(ArrayType len elt)
(AssignStmt lhs tok rhs)
(BasicLit kind value)
(BinaryExpr x op y)
(BranchStmt tok label)
(CallExpr fun args)
(CaseClause list body)
(ChanType dir value)
(CommClause comm body)
(CompositeLit type elts)
(DeferStmt call)
(Ellipsis elt)
(EmptyStmt)
(Field names type tag)
(ForStmt init cond post body)
(FuncDecl recv name type body)
(FuncLit type body)
(FuncType params results)
(GenDecl specs)
(GoStmt call)
(Ident name)
(IfStmt init cond body else)
(ImportSpec name path)
(IncDecStmt x tok)
(IndexExpr x index)
(InterfaceType methods)
(KeyValueExpr key value)
(MapType key value)
(RangeStmt key value tok x body)
(ReturnStmt results)
(SelectStmt body)
(SelectorExpr x sel)
(SendStmt chan value)
(SliceExpr x low high max)
(StarExpr x)
(StructType fields)
(SwitchStmt init tag body)
(TypeAssertExpr)
(TypeSpec name type)
(TypeSwitchStmt init assign body)
(UnaryExpr op x)
(ValueSpec names type values)
Additionally, there are the String, Token and nil atoms.
Strings are double-quoted string literals, as in (Ident "someName").
Tokens are also represented as double-quoted string literals, but are converted to token.Token values in contexts that require tokens,
such as in (BinaryExpr x "<" y), where "<" is transparently converted to token.LSS during matching.
The keyword 'nil' denotes the nil value, which represents the absence of any value.
We also define the (List head tail) node, which is used to represent sequences of elements as a singly linked list.
The head is a single element, and the tail is the remainder of the list.
For example,
(List "foo" (List "bar" (List "baz" (List nil nil))))
represents a list of three elements, "foo", "bar" and "baz". There is dedicated syntax for writing lists, which looks as follows:
["foo" "bar" "baz"]
This syntax is itself syntactic sugar for the following form:
"foo":"bar":"baz":[]
This form is of particular interest for pattern matching, as it allows matching on the head and tail. For example,
"foo":"bar":_
would match any list with at least two elements, where the first two elements are "foo" and "bar". This is equivalent to writing
(List "foo" (List "bar" _))
Note that it is not possible to match from the end of the list.
That is, there is no way to express a query such as "a list of any length where the last element is foo".
Note that unlike in LISP, nil and empty lists are distinct from one another.
In patterns, with respect to lists, nil is akin to Go's untyped nil.
It will match a nil ast.Node, but it will not match a nil []ast.Expr. Nil will, however, match pointers to named types such as *ast.Ident.
Similarly, lists are akin to Go's
slices. An empty list will match both a nil and an empty []ast.Expr, but it will not match a nil ast.Node.
Due to the difference between nil and empty lists, an empty list is represented as (List nil nil), i.e. a list with no head or tail.
Similarly, a list of one element is represented as (List el (List nil nil)). Unlike in LISP, it cannot be represented by (List el nil).
Finally, there are nodes that implement special logic or matching behavior.
(Any) matches any value. The underscore (_) maps to this node, making the following two forms equivalent:
(Ident _)
(Ident (Any))
(Builtin name) matches a built-in identifier or function by name.
This is a type-aware variant of (Ident name).
Instead of only comparing the name, it resolves the object behind the name and makes sure it's a pre-declared identifier.
For example, in the following piece of code
func fn() {
println(true)
true := false
println(true)
}
the pattern
(Builtin "true")
will match exactly once, on the first use of 'true' in the function.
Subsequent occurrences of 'true' no longer refer to the pre-declared identifier.
(Object name) matches an identifier by name, but yields the
types.Object it refers to.
(Function name) matches ast.Idents and ast.SelectorExprs that refer to a function with a given fully qualified name.
For example, "net/url.PathEscape" matches the PathEscape function in the net/url package,
and "(net/url.EscapeError).Error" refers to the Error method on the net/url.EscapeError type,
either on an instance of the type, or on the type itself.
For example, the following patterns match the following lines of code:
(CallExpr (Function "fmt.Println") _) // pattern 1
(CallExpr (Function "(net/url.EscapeError).Error") _) // pattern 2
fmt.Println("hello, world") // matches pattern 1
var x url.EscapeError
x.Error() // matches pattern 2
(url.EscapeError).Error(x) // also matches pattern 2
(Binding name node) creates or uses a binding.
Bindings work like variable assignments, allowing referring to already matched nodes.
As an example, bindings are necessary to match self-assignment of the form "x = x",
since we need to express that the right-hand side is identical to the left-hand side.
If a binding's node is not nil, the matcher will attempt to match a node according to the pattern.
If a binding's node is nil, the binding will either recall an existing value, or match the Any node.
It is an error to provide a non-nil node to a binding that has already been bound.
Referring back to the earlier example, the following pattern will match self-assignment of idents:
(AssignStmt (Binding "lhs" (Ident _)) "=" (Binding "lhs" nil))
Because bindings are a crucial component of pattern matching, there is special syntax for creating and recalling bindings.
Lower-case names refer to bindings. If standing on its own, the name "foo" will be equivalent to (Binding "foo" nil).
If a name is followed by an at-sign (@) then it will create a binding for the node that follows.
Together, this allows us to rewrite the earlier example as follows:
(AssignStmt lhs@(Ident _) "=" lhs)
(Or nodes...) is a variadic node that tries matching each node until one succeeds. For example, the following pattern matches all idents of name "foo" or "bar":
(Ident (Or "foo" "bar"))
We could also have written
(Or (Ident "foo") (Ident "bar"))
and achieved the same result. We can also mix different kinds of nodes:
(Or (Ident "foo") (CallExpr (Ident "bar") _))
When using bindings inside of nodes used inside Or, all or none of the bindings will be bound.
That is, partially matched nodes that ultimately failed to match will not produce any bindings observable outside of the matching attempt.
We can thus write
(Or (Ident name) (CallExpr name))
and 'name' will either be a String if the first option matched, or an Ident or SelectorExpr if the second option matched.
(Not node)
The Not node negates a match. For example, (Not (Ident _)) will match all nodes that aren't identifiers.
ChanDir(0)
Automatic unnesting of AST nodes
The Go AST has several types of nodes that wrap other nodes.
To simplify matching, we automatically unwrap some of these nodes.
These nodes are ExprStmt (for using expressions in a statement context),
ParenExpr (for parenthesized expressions),
DeclStmt (for declarations in a statement context),
and LabeledStmt (for labeled statements).
Thus, the query
(FuncLit _ [(CallExpr _ _)]
will match a function literal containing a single function call,
even though in the actual Go AST, the CallExpr is nested inside an ExprStmt,
as function bodies are made up of sequences of statements.
On the flip-side, there is no way to specifically match these wrapper nodes.
For example, there is no way of searching for unnecessary parentheses, like in the following piece of Go code:
((x)) += 2
*/
package pattern

View File

@@ -0,0 +1,51 @@
//go:build gofuzz
// +build gofuzz
package pattern
import (
"go/ast"
goparser "go/parser"
"go/token"
"os"
"path/filepath"
"strings"
)
var files []*ast.File
func init() {
fset := token.NewFileSet()
filepath.Walk("/usr/lib/go/src", func(path string, info os.FileInfo, err error) error {
if err != nil {
// XXX error handling
panic(err)
}
if !strings.HasSuffix(path, ".go") {
return nil
}
f, err := goparser.ParseFile(fset, path, nil, 0)
if err != nil {
return nil
}
files = append(files, f)
return nil
})
}
func Fuzz(data []byte) int {
p := &Parser{}
pat, err := p.Parse(string(data))
if err != nil {
if strings.Contains(err.Error(), "internal error") {
panic(err)
}
return 0
}
_ = pat.Root.String()
for _, f := range files {
Match(pat.Root, f)
}
return 1
}

View File

@@ -0,0 +1,221 @@
package pattern
import (
"fmt"
"go/token"
"unicode"
"unicode/utf8"
)
type lexer struct {
f *token.File
input string
start int
pos int
width int
items chan item
}
type itemType int
const eof = -1
const (
itemError itemType = iota
itemLeftParen
itemRightParen
itemLeftBracket
itemRightBracket
itemTypeName
itemVariable
itemAt
itemColon
itemBlank
itemString
itemEOF
)
func (typ itemType) String() string {
switch typ {
case itemError:
return "ERROR"
case itemLeftParen:
return "("
case itemRightParen:
return ")"
case itemLeftBracket:
return "["
case itemRightBracket:
return "]"
case itemTypeName:
return "TYPE"
case itemVariable:
return "VAR"
case itemAt:
return "@"
case itemColon:
return ":"
case itemBlank:
return "_"
case itemString:
return "STRING"
case itemEOF:
return "EOF"
default:
return fmt.Sprintf("itemType(%d)", typ)
}
}
type item struct {
typ itemType
val string
pos int
}
type stateFn func(*lexer) stateFn
func (l *lexer) run() {
for state := lexStart; state != nil; {
state = state(l)
}
close(l.items)
}
func (l *lexer) emitValue(t itemType, value string) {
l.items <- item{t, value, l.start}
l.start = l.pos
}
func (l *lexer) emit(t itemType) {
l.items <- item{t, l.input[l.start:l.pos], l.start}
l.start = l.pos
}
func lexStart(l *lexer) stateFn {
switch r := l.next(); {
case r == eof:
l.emit(itemEOF)
return nil
case unicode.IsSpace(r):
l.ignore()
case r == '(':
l.emit(itemLeftParen)
case r == ')':
l.emit(itemRightParen)
case r == '[':
l.emit(itemLeftBracket)
case r == ']':
l.emit(itemRightBracket)
case r == '@':
l.emit(itemAt)
case r == ':':
l.emit(itemColon)
case r == '_':
l.emit(itemBlank)
case r == '"':
l.backup()
return lexString
case unicode.IsUpper(r):
l.backup()
return lexType
case unicode.IsLower(r):
l.backup()
return lexVariable
default:
return l.errorf("unexpected character %c", r)
}
return lexStart
}
func (l *lexer) next() (r rune) {
if l.pos >= len(l.input) {
l.width = 0
return eof
}
r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
if r == '\n' {
l.f.AddLine(l.pos)
}
l.pos += l.width
return r
}
func (l *lexer) ignore() {
l.start = l.pos
}
func (l *lexer) backup() {
l.pos -= l.width
}
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
// TODO(dh): emit position information in errors
l.items <- item{
itemError,
fmt.Sprintf(format, args...),
l.start,
}
return nil
}
func isAlphaNumeric(r rune) bool {
return r >= '0' && r <= '9' ||
r >= 'a' && r <= 'z' ||
r >= 'A' && r <= 'Z'
}
func lexString(l *lexer) stateFn {
l.next() // skip quote
escape := false
var runes []rune
for {
switch r := l.next(); r {
case eof:
return l.errorf("unterminated string")
case '"':
if !escape {
l.emitValue(itemString, string(runes))
return lexStart
} else {
runes = append(runes, '"')
escape = false
}
case '\\':
if escape {
runes = append(runes, '\\')
escape = false
} else {
escape = true
}
default:
runes = append(runes, r)
}
}
}
func lexType(l *lexer) stateFn {
l.next()
for {
if !isAlphaNumeric(l.next()) {
l.backup()
l.emit(itemTypeName)
return lexStart
}
}
}
func lexVariable(l *lexer) stateFn {
l.next()
for {
if !isAlphaNumeric(l.next()) {
l.backup()
l.emit(itemVariable)
return lexStart
}
}
}

View File

@@ -0,0 +1,620 @@
package pattern
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"reflect"
"golang.org/x/exp/typeparams"
)
var tokensByString = map[string]Token{
"INT": Token(token.INT),
"FLOAT": Token(token.FLOAT),
"IMAG": Token(token.IMAG),
"CHAR": Token(token.CHAR),
"STRING": Token(token.STRING),
"+": Token(token.ADD),
"-": Token(token.SUB),
"*": Token(token.MUL),
"/": Token(token.QUO),
"%": Token(token.REM),
"&": Token(token.AND),
"|": Token(token.OR),
"^": Token(token.XOR),
"<<": Token(token.SHL),
">>": Token(token.SHR),
"&^": Token(token.AND_NOT),
"+=": Token(token.ADD_ASSIGN),
"-=": Token(token.SUB_ASSIGN),
"*=": Token(token.MUL_ASSIGN),
"/=": Token(token.QUO_ASSIGN),
"%=": Token(token.REM_ASSIGN),
"&=": Token(token.AND_ASSIGN),
"|=": Token(token.OR_ASSIGN),
"^=": Token(token.XOR_ASSIGN),
"<<=": Token(token.SHL_ASSIGN),
">>=": Token(token.SHR_ASSIGN),
"&^=": Token(token.AND_NOT_ASSIGN),
"&&": Token(token.LAND),
"||": Token(token.LOR),
"<-": Token(token.ARROW),
"++": Token(token.INC),
"--": Token(token.DEC),
"==": Token(token.EQL),
"<": Token(token.LSS),
">": Token(token.GTR),
"=": Token(token.ASSIGN),
"!": Token(token.NOT),
"!=": Token(token.NEQ),
"<=": Token(token.LEQ),
">=": Token(token.GEQ),
":=": Token(token.DEFINE),
"...": Token(token.ELLIPSIS),
"IMPORT": Token(token.IMPORT),
"VAR": Token(token.VAR),
"TYPE": Token(token.TYPE),
"CONST": Token(token.CONST),
"BREAK": Token(token.BREAK),
"CONTINUE": Token(token.CONTINUE),
"GOTO": Token(token.GOTO),
"FALLTHROUGH": Token(token.FALLTHROUGH),
}
func maybeToken(node Node) (Node, bool) {
if node, ok := node.(String); ok {
if tok, ok := tokensByString[string(node)]; ok {
return tok, true
}
return node, false
}
return node, false
}
func isNil(v interface{}) bool {
if v == nil {
return true
}
if _, ok := v.(Nil); ok {
return true
}
return false
}
type matcher interface {
Match(*Matcher, interface{}) (interface{}, bool)
}
type State = map[string]interface{}
type Matcher struct {
TypesInfo *types.Info
State State
}
func (m *Matcher) fork() *Matcher {
state := make(State, len(m.State))
for k, v := range m.State {
state[k] = v
}
return &Matcher{
TypesInfo: m.TypesInfo,
State: state,
}
}
func (m *Matcher) merge(mc *Matcher) {
m.State = mc.State
}
func (m *Matcher) Match(a Node, b ast.Node) bool {
m.State = State{}
_, ok := match(m, a, b)
return ok
}
func Match(a Node, b ast.Node) (*Matcher, bool) {
m := &Matcher{}
ret := m.Match(a, b)
return m, ret
}
// Match two items, which may be (Node, AST) or (AST, AST)
func match(m *Matcher, l, r interface{}) (interface{}, bool) {
if _, ok := r.(Node); ok {
panic("Node mustn't be on right side of match")
}
switch l := l.(type) {
case *ast.ParenExpr:
return match(m, l.X, r)
case *ast.ExprStmt:
return match(m, l.X, r)
case *ast.DeclStmt:
return match(m, l.Decl, r)
case *ast.LabeledStmt:
return match(m, l.Stmt, r)
case *ast.BlockStmt:
return match(m, l.List, r)
case *ast.FieldList:
return match(m, l.List, r)
}
switch r := r.(type) {
case *ast.ParenExpr:
return match(m, l, r.X)
case *ast.ExprStmt:
return match(m, l, r.X)
case *ast.DeclStmt:
return match(m, l, r.Decl)
case *ast.LabeledStmt:
return match(m, l, r.Stmt)
case *ast.BlockStmt:
if r == nil {
return match(m, l, nil)
}
return match(m, l, r.List)
case *ast.FieldList:
if r == nil {
return match(m, l, nil)
}
return match(m, l, r.List)
case *ast.BasicLit:
if r == nil {
return match(m, l, nil)
}
}
if l, ok := l.(matcher); ok {
return l.Match(m, r)
}
if l, ok := l.(Node); ok {
// Matching of pattern with concrete value
return matchNodeAST(m, l, r)
}
if l == nil || r == nil {
return nil, l == r
}
{
ln, ok1 := l.(ast.Node)
rn, ok2 := r.(ast.Node)
if ok1 && ok2 {
return matchAST(m, ln, rn)
}
}
{
obj, ok := l.(types.Object)
if ok {
switch r := r.(type) {
case *ast.Ident:
return obj, obj == m.TypesInfo.ObjectOf(r)
case *ast.SelectorExpr:
return obj, obj == m.TypesInfo.ObjectOf(r.Sel)
default:
return obj, false
}
}
}
{
ln, ok1 := l.([]ast.Expr)
rn, ok2 := r.([]ast.Expr)
if ok1 || ok2 {
if ok1 && !ok2 {
rn = []ast.Expr{r.(ast.Expr)}
} else if !ok1 && ok2 {
ln = []ast.Expr{l.(ast.Expr)}
}
if len(ln) != len(rn) {
return nil, false
}
for i, ll := range ln {
if _, ok := match(m, ll, rn[i]); !ok {
return nil, false
}
}
return r, true
}
}
{
ln, ok1 := l.([]ast.Stmt)
rn, ok2 := r.([]ast.Stmt)
if ok1 || ok2 {
if ok1 && !ok2 {
rn = []ast.Stmt{r.(ast.Stmt)}
} else if !ok1 && ok2 {
ln = []ast.Stmt{l.(ast.Stmt)}
}
if len(ln) != len(rn) {
return nil, false
}
for i, ll := range ln {
if _, ok := match(m, ll, rn[i]); !ok {
return nil, false
}
}
return r, true
}
}
{
ln, ok1 := l.([]*ast.Field)
rn, ok2 := r.([]*ast.Field)
if ok1 || ok2 {
if ok1 && !ok2 {
rn = []*ast.Field{r.(*ast.Field)}
} else if !ok1 && ok2 {
ln = []*ast.Field{l.(*ast.Field)}
}
if len(ln) != len(rn) {
return nil, false
}
for i, ll := range ln {
if _, ok := match(m, ll, rn[i]); !ok {
return nil, false
}
}
return r, true
}
}
panic(fmt.Sprintf("unsupported comparison: %T and %T", l, r))
}
// Match a Node with an AST node
func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) {
switch b := b.(type) {
case []ast.Stmt:
// 'a' is not a List or we'd be using its Match
// implementation.
if len(b) != 1 {
return nil, false
}
return match(m, a, b[0])
case []ast.Expr:
// 'a' is not a List or we'd be using its Match
// implementation.
if len(b) != 1 {
return nil, false
}
return match(m, a, b[0])
case ast.Node:
ra := reflect.ValueOf(a)
rb := reflect.ValueOf(b).Elem()
if ra.Type().Name() != rb.Type().Name() {
return nil, false
}
for i := 0; i < ra.NumField(); i++ {
af := ra.Field(i)
fieldName := ra.Type().Field(i).Name
bf := rb.FieldByName(fieldName)
if (bf == reflect.Value{}) {
panic(fmt.Sprintf("internal error: could not find field %s in type %t when comparing with %T", fieldName, b, a))
}
ai := af.Interface()
bi := bf.Interface()
if ai == nil {
return b, bi == nil
}
if _, ok := match(m, ai.(Node), bi); !ok {
return b, false
}
}
return b, true
case nil:
return nil, a == Nil{}
default:
panic(fmt.Sprintf("unhandled type %T", b))
}
}
// Match two AST nodes
func matchAST(m *Matcher, a, b ast.Node) (interface{}, bool) {
ra := reflect.ValueOf(a)
rb := reflect.ValueOf(b)
if ra.Type() != rb.Type() {
return nil, false
}
if ra.IsNil() || rb.IsNil() {
return rb, ra.IsNil() == rb.IsNil()
}
ra = ra.Elem()
rb = rb.Elem()
for i := 0; i < ra.NumField(); i++ {
af := ra.Field(i)
bf := rb.Field(i)
if af.Type() == rtTokPos || af.Type() == rtObject || af.Type() == rtCommentGroup {
continue
}
switch af.Kind() {
case reflect.Slice:
if af.Len() != bf.Len() {
return nil, false
}
for j := 0; j < af.Len(); j++ {
if _, ok := match(m, af.Index(j).Interface().(ast.Node), bf.Index(j).Interface().(ast.Node)); !ok {
return nil, false
}
}
case reflect.String:
if af.String() != bf.String() {
return nil, false
}
case reflect.Int:
if af.Int() != bf.Int() {
return nil, false
}
case reflect.Bool:
if af.Bool() != bf.Bool() {
return nil, false
}
case reflect.Ptr, reflect.Interface:
if _, ok := match(m, af.Interface(), bf.Interface()); !ok {
return nil, false
}
default:
panic(fmt.Sprintf("internal error: unhandled kind %s (%T)", af.Kind(), af.Interface()))
}
}
return b, true
}
func (b Binding) Match(m *Matcher, node interface{}) (interface{}, bool) {
if isNil(b.Node) {
v, ok := m.State[b.Name]
if ok {
// Recall value
return match(m, v, node)
}
// Matching anything
b.Node = Any{}
}
// Store value
if _, ok := m.State[b.Name]; ok {
panic(fmt.Sprintf("binding already created: %s", b.Name))
}
new, ret := match(m, b.Node, node)
if ret {
m.State[b.Name] = new
}
return new, ret
}
func (Any) Match(m *Matcher, node interface{}) (interface{}, bool) {
return node, true
}
func (l List) Match(m *Matcher, node interface{}) (interface{}, bool) {
v := reflect.ValueOf(node)
if v.Kind() == reflect.Slice {
if isNil(l.Head) {
return node, v.Len() == 0
}
if v.Len() == 0 {
return nil, false
}
// OPT(dh): don't check the entire tail if head didn't match
_, ok1 := match(m, l.Head, v.Index(0).Interface())
_, ok2 := match(m, l.Tail, v.Slice(1, v.Len()).Interface())
return node, ok1 && ok2
}
// Our empty list does not equal an untyped Go nil. This way, we can
// tell apart an if with no else and an if with an empty else.
return nil, false
}
func (s String) Match(m *Matcher, node interface{}) (interface{}, bool) {
switch o := node.(type) {
case token.Token:
if tok, ok := maybeToken(s); ok {
return match(m, tok, node)
}
return nil, false
case string:
return o, string(s) == o
case types.TypeAndValue:
return o, o.Value != nil && o.Value.String() == string(s)
default:
return nil, false
}
}
func (tok Token) Match(m *Matcher, node interface{}) (interface{}, bool) {
o, ok := node.(token.Token)
if !ok {
return nil, false
}
return o, token.Token(tok) == o
}
func (Nil) Match(m *Matcher, node interface{}) (interface{}, bool) {
return nil, isNil(node) || reflect.ValueOf(node).IsNil()
}
func (builtin Builtin) Match(m *Matcher, node interface{}) (interface{}, bool) {
r, ok := match(m, Ident(builtin), node)
if !ok {
return nil, false
}
ident := r.(*ast.Ident)
obj := m.TypesInfo.ObjectOf(ident)
if obj != types.Universe.Lookup(ident.Name) {
return nil, false
}
return ident, true
}
func (obj Object) Match(m *Matcher, node interface{}) (interface{}, bool) {
r, ok := match(m, Ident(obj), node)
if !ok {
return nil, false
}
ident := r.(*ast.Ident)
id := m.TypesInfo.ObjectOf(ident)
_, ok = match(m, obj.Name, ident.Name)
return id, ok
}
func (fn Function) Match(m *Matcher, node interface{}) (interface{}, bool) {
var name string
var obj types.Object
base := []Node{
Ident{Any{}},
SelectorExpr{Any{}, Any{}},
}
p := Or{
Nodes: append(base,
IndexExpr{Or{Nodes: base}, Any{}},
IndexListExpr{Or{Nodes: base}, Any{}})}
r, ok := match(m, p, node)
if !ok {
return nil, false
}
fun := r
switch idx := fun.(type) {
case *ast.IndexExpr:
fun = idx.X
case *typeparams.IndexListExpr:
fun = idx.X
}
switch fun := fun.(type) {
case *ast.Ident:
obj = m.TypesInfo.ObjectOf(fun)
switch obj := obj.(type) {
case *types.Func:
// OPT(dh): optimize this similar to code.FuncName
name = obj.FullName()
case *types.Builtin:
name = obj.Name()
case *types.TypeName:
name = types.TypeString(obj.Type(), nil)
default:
return nil, false
}
case *ast.SelectorExpr:
obj = m.TypesInfo.ObjectOf(fun.Sel)
switch obj := obj.(type) {
case *types.Func:
// OPT(dh): optimize this similar to code.FuncName
name = obj.FullName()
case *types.TypeName:
name = types.TypeString(obj.Type(), nil)
default:
return nil, false
}
default:
panic("unreachable")
}
_, ok = match(m, fn.Name, name)
return obj, ok
}
func (or Or) Match(m *Matcher, node interface{}) (interface{}, bool) {
for _, opt := range or.Nodes {
mc := m.fork()
if ret, ok := match(mc, opt, node); ok {
m.merge(mc)
return ret, true
}
}
return nil, false
}
func (not Not) Match(m *Matcher, node interface{}) (interface{}, bool) {
_, ok := match(m, not.Node, node)
if ok {
return nil, false
}
return node, true
}
var integerLiteralQ = MustParse(`(Or (BasicLit "INT" _) (UnaryExpr (Or "+" "-") (IntegerLiteral _)))`)
func (lit IntegerLiteral) Match(m *Matcher, node interface{}) (interface{}, bool) {
matched, ok := match(m, integerLiteralQ.Root, node)
if !ok {
return nil, false
}
tv, ok := m.TypesInfo.Types[matched.(ast.Expr)]
if !ok {
return nil, false
}
if tv.Value == nil {
return nil, false
}
_, ok = match(m, lit.Value, tv)
return matched, ok
}
func (texpr TrulyConstantExpression) Match(m *Matcher, node interface{}) (interface{}, bool) {
expr, ok := node.(ast.Expr)
if !ok {
return nil, false
}
tv, ok := m.TypesInfo.Types[expr]
if !ok {
return nil, false
}
if tv.Value == nil {
return nil, false
}
truly := true
ast.Inspect(expr, func(node ast.Node) bool {
if _, ok := node.(*ast.Ident); ok {
truly = false
return false
}
return true
})
if !truly {
return nil, false
}
_, ok = match(m, texpr.Value, tv)
return expr, ok
}
var (
// Types of fields in go/ast structs that we want to skip
rtTokPos = reflect.TypeOf(token.Pos(0))
rtObject = reflect.TypeOf((*ast.Object)(nil))
rtCommentGroup = reflect.TypeOf((*ast.CommentGroup)(nil))
)
var (
_ matcher = Binding{}
_ matcher = Any{}
_ matcher = List{}
_ matcher = String("")
_ matcher = Token(0)
_ matcher = Nil{}
_ matcher = Builtin{}
_ matcher = Object{}
_ matcher = Function{}
_ matcher = Or{}
_ matcher = Not{}
_ matcher = IntegerLiteral{}
_ matcher = TrulyConstantExpression{}
)

View File

@@ -0,0 +1,467 @@
package pattern
import (
"fmt"
"go/ast"
"go/token"
"reflect"
)
type Pattern struct {
Root Node
// Relevant contains instances of ast.Node that could potentially
// initiate a successful match of the pattern.
Relevant []reflect.Type
}
func MustParse(s string) Pattern {
p := &Parser{AllowTypeInfo: true}
pat, err := p.Parse(s)
if err != nil {
panic(err)
}
return pat
}
func roots(node Node) []reflect.Type {
switch node := node.(type) {
case Or:
var out []reflect.Type
for _, el := range node.Nodes {
out = append(out, roots(el)...)
}
return out
case Not:
return roots(node.Node)
case Binding:
return roots(node.Node)
case Nil, nil:
// this branch is reached via bindings
return allTypes
default:
Ts, ok := nodeToASTTypes[reflect.TypeOf(node)]
if !ok {
panic(fmt.Sprintf("internal error: unhandled type %T", node))
}
return Ts
}
}
var allTypes = []reflect.Type{
reflect.TypeOf((*ast.RangeStmt)(nil)),
reflect.TypeOf((*ast.AssignStmt)(nil)),
reflect.TypeOf((*ast.IndexExpr)(nil)),
reflect.TypeOf((*ast.Ident)(nil)),
reflect.TypeOf((*ast.ValueSpec)(nil)),
reflect.TypeOf((*ast.GenDecl)(nil)),
reflect.TypeOf((*ast.BinaryExpr)(nil)),
reflect.TypeOf((*ast.ForStmt)(nil)),
reflect.TypeOf((*ast.ArrayType)(nil)),
reflect.TypeOf((*ast.DeferStmt)(nil)),
reflect.TypeOf((*ast.MapType)(nil)),
reflect.TypeOf((*ast.ReturnStmt)(nil)),
reflect.TypeOf((*ast.SliceExpr)(nil)),
reflect.TypeOf((*ast.StarExpr)(nil)),
reflect.TypeOf((*ast.UnaryExpr)(nil)),
reflect.TypeOf((*ast.SendStmt)(nil)),
reflect.TypeOf((*ast.SelectStmt)(nil)),
reflect.TypeOf((*ast.ImportSpec)(nil)),
reflect.TypeOf((*ast.IfStmt)(nil)),
reflect.TypeOf((*ast.GoStmt)(nil)),
reflect.TypeOf((*ast.Field)(nil)),
reflect.TypeOf((*ast.SelectorExpr)(nil)),
reflect.TypeOf((*ast.StructType)(nil)),
reflect.TypeOf((*ast.KeyValueExpr)(nil)),
reflect.TypeOf((*ast.FuncType)(nil)),
reflect.TypeOf((*ast.FuncLit)(nil)),
reflect.TypeOf((*ast.FuncDecl)(nil)),
reflect.TypeOf((*ast.ChanType)(nil)),
reflect.TypeOf((*ast.CallExpr)(nil)),
reflect.TypeOf((*ast.CaseClause)(nil)),
reflect.TypeOf((*ast.CommClause)(nil)),
reflect.TypeOf((*ast.CompositeLit)(nil)),
reflect.TypeOf((*ast.EmptyStmt)(nil)),
reflect.TypeOf((*ast.SwitchStmt)(nil)),
reflect.TypeOf((*ast.TypeSwitchStmt)(nil)),
reflect.TypeOf((*ast.TypeAssertExpr)(nil)),
reflect.TypeOf((*ast.TypeSpec)(nil)),
reflect.TypeOf((*ast.InterfaceType)(nil)),
reflect.TypeOf((*ast.BranchStmt)(nil)),
reflect.TypeOf((*ast.IncDecStmt)(nil)),
reflect.TypeOf((*ast.BasicLit)(nil)),
}
var nodeToASTTypes = map[reflect.Type][]reflect.Type{
reflect.TypeOf(String("")): nil,
reflect.TypeOf(Token(0)): nil,
reflect.TypeOf(List{}): {reflect.TypeOf((*ast.BlockStmt)(nil)), reflect.TypeOf((*ast.FieldList)(nil))},
reflect.TypeOf(Builtin{}): {reflect.TypeOf((*ast.Ident)(nil))},
reflect.TypeOf(Object{}): {reflect.TypeOf((*ast.Ident)(nil))},
reflect.TypeOf(Function{}): {reflect.TypeOf((*ast.Ident)(nil)), reflect.TypeOf((*ast.SelectorExpr)(nil))},
reflect.TypeOf(Any{}): allTypes,
reflect.TypeOf(RangeStmt{}): {reflect.TypeOf((*ast.RangeStmt)(nil))},
reflect.TypeOf(AssignStmt{}): {reflect.TypeOf((*ast.AssignStmt)(nil))},
reflect.TypeOf(IndexExpr{}): {reflect.TypeOf((*ast.IndexExpr)(nil))},
reflect.TypeOf(Ident{}): {reflect.TypeOf((*ast.Ident)(nil))},
reflect.TypeOf(ValueSpec{}): {reflect.TypeOf((*ast.ValueSpec)(nil))},
reflect.TypeOf(GenDecl{}): {reflect.TypeOf((*ast.GenDecl)(nil))},
reflect.TypeOf(BinaryExpr{}): {reflect.TypeOf((*ast.BinaryExpr)(nil))},
reflect.TypeOf(ForStmt{}): {reflect.TypeOf((*ast.ForStmt)(nil))},
reflect.TypeOf(ArrayType{}): {reflect.TypeOf((*ast.ArrayType)(nil))},
reflect.TypeOf(DeferStmt{}): {reflect.TypeOf((*ast.DeferStmt)(nil))},
reflect.TypeOf(MapType{}): {reflect.TypeOf((*ast.MapType)(nil))},
reflect.TypeOf(ReturnStmt{}): {reflect.TypeOf((*ast.ReturnStmt)(nil))},
reflect.TypeOf(SliceExpr{}): {reflect.TypeOf((*ast.SliceExpr)(nil))},
reflect.TypeOf(StarExpr{}): {reflect.TypeOf((*ast.StarExpr)(nil))},
reflect.TypeOf(UnaryExpr{}): {reflect.TypeOf((*ast.UnaryExpr)(nil))},
reflect.TypeOf(SendStmt{}): {reflect.TypeOf((*ast.SendStmt)(nil))},
reflect.TypeOf(SelectStmt{}): {reflect.TypeOf((*ast.SelectStmt)(nil))},
reflect.TypeOf(ImportSpec{}): {reflect.TypeOf((*ast.ImportSpec)(nil))},
reflect.TypeOf(IfStmt{}): {reflect.TypeOf((*ast.IfStmt)(nil))},
reflect.TypeOf(GoStmt{}): {reflect.TypeOf((*ast.GoStmt)(nil))},
reflect.TypeOf(Field{}): {reflect.TypeOf((*ast.Field)(nil))},
reflect.TypeOf(SelectorExpr{}): {reflect.TypeOf((*ast.SelectorExpr)(nil))},
reflect.TypeOf(StructType{}): {reflect.TypeOf((*ast.StructType)(nil))},
reflect.TypeOf(KeyValueExpr{}): {reflect.TypeOf((*ast.KeyValueExpr)(nil))},
reflect.TypeOf(FuncType{}): {reflect.TypeOf((*ast.FuncType)(nil))},
reflect.TypeOf(FuncLit{}): {reflect.TypeOf((*ast.FuncLit)(nil))},
reflect.TypeOf(FuncDecl{}): {reflect.TypeOf((*ast.FuncDecl)(nil))},
reflect.TypeOf(ChanType{}): {reflect.TypeOf((*ast.ChanType)(nil))},
reflect.TypeOf(CallExpr{}): {reflect.TypeOf((*ast.CallExpr)(nil))},
reflect.TypeOf(CaseClause{}): {reflect.TypeOf((*ast.CaseClause)(nil))},
reflect.TypeOf(CommClause{}): {reflect.TypeOf((*ast.CommClause)(nil))},
reflect.TypeOf(CompositeLit{}): {reflect.TypeOf((*ast.CompositeLit)(nil))},
reflect.TypeOf(EmptyStmt{}): {reflect.TypeOf((*ast.EmptyStmt)(nil))},
reflect.TypeOf(SwitchStmt{}): {reflect.TypeOf((*ast.SwitchStmt)(nil))},
reflect.TypeOf(TypeSwitchStmt{}): {reflect.TypeOf((*ast.TypeSwitchStmt)(nil))},
reflect.TypeOf(TypeAssertExpr{}): {reflect.TypeOf((*ast.TypeAssertExpr)(nil))},
reflect.TypeOf(TypeSpec{}): {reflect.TypeOf((*ast.TypeSpec)(nil))},
reflect.TypeOf(InterfaceType{}): {reflect.TypeOf((*ast.InterfaceType)(nil))},
reflect.TypeOf(BranchStmt{}): {reflect.TypeOf((*ast.BranchStmt)(nil))},
reflect.TypeOf(IncDecStmt{}): {reflect.TypeOf((*ast.IncDecStmt)(nil))},
reflect.TypeOf(BasicLit{}): {reflect.TypeOf((*ast.BasicLit)(nil))},
reflect.TypeOf(IntegerLiteral{}): {reflect.TypeOf((*ast.BasicLit)(nil)), reflect.TypeOf((*ast.UnaryExpr)(nil))},
reflect.TypeOf(TrulyConstantExpression{}): allTypes, // this is an over-approximation, which is fine
}
var requiresTypeInfo = map[string]bool{
"Function": true,
"Builtin": true,
"Object": true,
}
type Parser struct {
// Allow nodes that rely on type information
AllowTypeInfo bool
lex *lexer
cur item
last *item
items chan item
}
func (p *Parser) Parse(s string) (Pattern, error) {
p.cur = item{}
p.last = nil
p.items = nil
fset := token.NewFileSet()
p.lex = &lexer{
f: fset.AddFile("<input>", -1, len(s)),
input: s,
items: make(chan item),
}
go p.lex.run()
p.items = p.lex.items
root, err := p.node()
if err != nil {
// drain lexer if parsing failed
for range p.lex.items {
}
return Pattern{}, err
}
if item := <-p.lex.items; item.typ != itemEOF {
return Pattern{}, fmt.Errorf("unexpected token %s after end of pattern", item.typ)
}
return Pattern{
Root: root,
Relevant: roots(root),
}, nil
}
func (p *Parser) next() item {
if p.last != nil {
n := *p.last
p.last = nil
return n
}
var ok bool
p.cur, ok = <-p.items
if !ok {
p.cur = item{typ: eof}
}
return p.cur
}
func (p *Parser) rewind() {
p.last = &p.cur
}
func (p *Parser) peek() item {
n := p.next()
p.rewind()
return n
}
func (p *Parser) accept(typ itemType) (item, bool) {
n := p.next()
if n.typ == typ {
return n, true
}
p.rewind()
return item{}, false
}
func (p *Parser) unexpectedToken(valid string) error {
if p.cur.typ == itemError {
return fmt.Errorf("error lexing input: %s", p.cur.val)
}
var got string
switch p.cur.typ {
case itemTypeName, itemVariable, itemString:
got = p.cur.val
default:
got = "'" + p.cur.typ.String() + "'"
}
pos := p.lex.f.Position(token.Pos(p.cur.pos))
return fmt.Errorf("%s: expected %s, found %s", pos, valid, got)
}
func (p *Parser) node() (Node, error) {
if _, ok := p.accept(itemLeftParen); !ok {
return nil, p.unexpectedToken("'('")
}
typ, ok := p.accept(itemTypeName)
if !ok {
return nil, p.unexpectedToken("Node type")
}
var objs []Node
for {
if _, ok := p.accept(itemRightParen); ok {
break
} else {
p.rewind()
obj, err := p.object()
if err != nil {
return nil, err
}
objs = append(objs, obj)
}
}
return p.populateNode(typ.val, objs)
}
func populateNode(typ string, objs []Node, allowTypeInfo bool) (Node, error) {
T, ok := structNodes[typ]
if !ok {
return nil, fmt.Errorf("unknown node %s", typ)
}
if !allowTypeInfo && requiresTypeInfo[typ] {
return nil, fmt.Errorf("Node %s requires type information", typ)
}
pv := reflect.New(T)
v := pv.Elem()
if v.NumField() == 1 {
f := v.Field(0)
if f.Type().Kind() == reflect.Slice {
// Variadic node
f.Set(reflect.AppendSlice(f, reflect.ValueOf(objs)))
return v.Interface().(Node), nil
}
}
if len(objs) != v.NumField() {
return nil, fmt.Errorf("tried to initialize node %s with %d values, expected %d", typ, len(objs), v.NumField())
}
for i := 0; i < v.NumField(); i++ {
f := v.Field(i)
if f.Kind() == reflect.String {
if obj, ok := objs[i].(String); ok {
f.Set(reflect.ValueOf(string(obj)))
} else {
return nil, fmt.Errorf("first argument of (Binding name node) must be string, but got %s", objs[i])
}
} else {
f.Set(reflect.ValueOf(objs[i]))
}
}
return v.Interface().(Node), nil
}
func (p *Parser) populateNode(typ string, objs []Node) (Node, error) {
return populateNode(typ, objs, p.AllowTypeInfo)
}
var structNodes = map[string]reflect.Type{
"Any": reflect.TypeOf(Any{}),
"Ellipsis": reflect.TypeOf(Ellipsis{}),
"List": reflect.TypeOf(List{}),
"Binding": reflect.TypeOf(Binding{}),
"RangeStmt": reflect.TypeOf(RangeStmt{}),
"AssignStmt": reflect.TypeOf(AssignStmt{}),
"IndexExpr": reflect.TypeOf(IndexExpr{}),
"Ident": reflect.TypeOf(Ident{}),
"Builtin": reflect.TypeOf(Builtin{}),
"ValueSpec": reflect.TypeOf(ValueSpec{}),
"GenDecl": reflect.TypeOf(GenDecl{}),
"BinaryExpr": reflect.TypeOf(BinaryExpr{}),
"ForStmt": reflect.TypeOf(ForStmt{}),
"ArrayType": reflect.TypeOf(ArrayType{}),
"DeferStmt": reflect.TypeOf(DeferStmt{}),
"MapType": reflect.TypeOf(MapType{}),
"ReturnStmt": reflect.TypeOf(ReturnStmt{}),
"SliceExpr": reflect.TypeOf(SliceExpr{}),
"StarExpr": reflect.TypeOf(StarExpr{}),
"UnaryExpr": reflect.TypeOf(UnaryExpr{}),
"SendStmt": reflect.TypeOf(SendStmt{}),
"SelectStmt": reflect.TypeOf(SelectStmt{}),
"ImportSpec": reflect.TypeOf(ImportSpec{}),
"IfStmt": reflect.TypeOf(IfStmt{}),
"GoStmt": reflect.TypeOf(GoStmt{}),
"Field": reflect.TypeOf(Field{}),
"SelectorExpr": reflect.TypeOf(SelectorExpr{}),
"StructType": reflect.TypeOf(StructType{}),
"KeyValueExpr": reflect.TypeOf(KeyValueExpr{}),
"FuncType": reflect.TypeOf(FuncType{}),
"FuncLit": reflect.TypeOf(FuncLit{}),
"FuncDecl": reflect.TypeOf(FuncDecl{}),
"ChanType": reflect.TypeOf(ChanType{}),
"CallExpr": reflect.TypeOf(CallExpr{}),
"CaseClause": reflect.TypeOf(CaseClause{}),
"CommClause": reflect.TypeOf(CommClause{}),
"CompositeLit": reflect.TypeOf(CompositeLit{}),
"EmptyStmt": reflect.TypeOf(EmptyStmt{}),
"SwitchStmt": reflect.TypeOf(SwitchStmt{}),
"TypeSwitchStmt": reflect.TypeOf(TypeSwitchStmt{}),
"TypeAssertExpr": reflect.TypeOf(TypeAssertExpr{}),
"TypeSpec": reflect.TypeOf(TypeSpec{}),
"InterfaceType": reflect.TypeOf(InterfaceType{}),
"BranchStmt": reflect.TypeOf(BranchStmt{}),
"IncDecStmt": reflect.TypeOf(IncDecStmt{}),
"BasicLit": reflect.TypeOf(BasicLit{}),
"Object": reflect.TypeOf(Object{}),
"Function": reflect.TypeOf(Function{}),
"Or": reflect.TypeOf(Or{}),
"Not": reflect.TypeOf(Not{}),
"IntegerLiteral": reflect.TypeOf(IntegerLiteral{}),
"TrulyConstantExpression": reflect.TypeOf(TrulyConstantExpression{}),
}
func (p *Parser) object() (Node, error) {
n := p.next()
switch n.typ {
case itemLeftParen:
p.rewind()
node, err := p.node()
if err != nil {
return node, err
}
if p.peek().typ == itemColon {
p.next()
tail, err := p.object()
if err != nil {
return node, err
}
return List{Head: node, Tail: tail}, nil
}
return node, nil
case itemLeftBracket:
p.rewind()
return p.array()
case itemVariable:
v := n
if v.val == "nil" {
return Nil{}, nil
}
var b Binding
if _, ok := p.accept(itemAt); ok {
o, err := p.node()
if err != nil {
return nil, err
}
b = Binding{
Name: v.val,
Node: o,
}
} else {
p.rewind()
b = Binding{Name: v.val}
}
if p.peek().typ == itemColon {
p.next()
tail, err := p.object()
if err != nil {
return b, err
}
return List{Head: b, Tail: tail}, nil
}
return b, nil
case itemBlank:
if p.peek().typ == itemColon {
p.next()
tail, err := p.object()
if err != nil {
return Any{}, err
}
return List{Head: Any{}, Tail: tail}, nil
}
return Any{}, nil
case itemString:
return String(n.val), nil
default:
return nil, p.unexpectedToken("object")
}
}
func (p *Parser) array() (Node, error) {
if _, ok := p.accept(itemLeftBracket); !ok {
return nil, p.unexpectedToken("'['")
}
var objs []Node
for {
if _, ok := p.accept(itemRightBracket); ok {
break
} else {
p.rewind()
obj, err := p.object()
if err != nil {
return nil, err
}
objs = append(objs, obj)
}
}
tail := List{}
for i := len(objs) - 1; i >= 0; i-- {
l := List{
Head: objs[i],
Tail: tail,
}
tail = l
}
return tail, nil
}
/*
Node ::= itemLeftParen itemTypeName Object* itemRightParen
Object ::= Node | Array | Binding | itemVariable | itemBlank | itemString
Array := itemLeftBracket Object* itemRightBracket
Array := Object itemColon Object
Binding ::= itemVariable itemAt Node
*/

View File

@@ -0,0 +1,522 @@
package pattern
import (
"fmt"
"go/token"
"reflect"
"strings"
)
var (
_ Node = Ellipsis{}
_ Node = Binding{}
_ Node = RangeStmt{}
_ Node = AssignStmt{}
_ Node = IndexExpr{}
_ Node = IndexListExpr{}
_ Node = Ident{}
_ Node = Builtin{}
_ Node = String("")
_ Node = Any{}
_ Node = ValueSpec{}
_ Node = List{}
_ Node = GenDecl{}
_ Node = BinaryExpr{}
_ Node = ForStmt{}
_ Node = ArrayType{}
_ Node = DeferStmt{}
_ Node = MapType{}
_ Node = ReturnStmt{}
_ Node = SliceExpr{}
_ Node = StarExpr{}
_ Node = UnaryExpr{}
_ Node = SendStmt{}
_ Node = SelectStmt{}
_ Node = ImportSpec{}
_ Node = IfStmt{}
_ Node = GoStmt{}
_ Node = Field{}
_ Node = SelectorExpr{}
_ Node = StructType{}
_ Node = KeyValueExpr{}
_ Node = FuncType{}
_ Node = FuncLit{}
_ Node = FuncDecl{}
_ Node = Token(0)
_ Node = ChanType{}
_ Node = CallExpr{}
_ Node = CaseClause{}
_ Node = CommClause{}
_ Node = CompositeLit{}
_ Node = EmptyStmt{}
_ Node = SwitchStmt{}
_ Node = TypeSwitchStmt{}
_ Node = TypeAssertExpr{}
_ Node = TypeSpec{}
_ Node = InterfaceType{}
_ Node = BranchStmt{}
_ Node = IncDecStmt{}
_ Node = BasicLit{}
_ Node = Nil{}
_ Node = Object{}
_ Node = Function{}
_ Node = Not{}
_ Node = Or{}
_ Node = IntegerLiteral{}
_ Node = TrulyConstantExpression{}
)
type Function struct {
Name Node
}
type Token token.Token
type Nil struct {
}
type Ellipsis struct {
Elt Node
}
type IncDecStmt struct {
X Node
Tok Node
}
type BranchStmt struct {
Tok Node
Label Node
}
type InterfaceType struct {
Methods Node
}
type TypeSpec struct {
Name Node
Type Node
}
type TypeAssertExpr struct {
X Node
Type Node
}
type TypeSwitchStmt struct {
Init Node
Assign Node
Body Node
}
type SwitchStmt struct {
Init Node
Tag Node
Body Node
}
type EmptyStmt struct {
}
type CompositeLit struct {
Type Node
Elts Node
}
type CommClause struct {
Comm Node
Body Node
}
type CaseClause struct {
List Node
Body Node
}
type CallExpr struct {
Fun Node
Args Node
// XXX handle ellipsis
}
// TODO(dh): add a ChanDir node, and a way of instantiating it.
type ChanType struct {
Dir Node
Value Node
}
type FuncDecl struct {
Recv Node
Name Node
Type Node
Body Node
}
type FuncLit struct {
Type Node
Body Node
}
type FuncType struct {
Params Node
Results Node
}
type KeyValueExpr struct {
Key Node
Value Node
}
type StructType struct {
Fields Node
}
type SelectorExpr struct {
X Node
Sel Node
}
type Field struct {
Names Node
Type Node
Tag Node
}
type GoStmt struct {
Call Node
}
type IfStmt struct {
Init Node
Cond Node
Body Node
Else Node
}
type ImportSpec struct {
Name Node
Path Node
}
type SelectStmt struct {
Body Node
}
type ArrayType struct {
Len Node
Elt Node
}
type DeferStmt struct {
Call Node
}
type MapType struct {
Key Node
Value Node
}
type ReturnStmt struct {
Results Node
}
type SliceExpr struct {
X Node
Low Node
High Node
Max Node
}
type StarExpr struct {
X Node
}
type UnaryExpr struct {
Op Node
X Node
}
type SendStmt struct {
Chan Node
Value Node
}
type Binding struct {
Name string
Node Node
}
type RangeStmt struct {
Key Node
Value Node
Tok Node
X Node
Body Node
}
type AssignStmt struct {
Lhs Node
Tok Node
Rhs Node
}
type IndexExpr struct {
X Node
Index Node
}
type IndexListExpr struct {
X Node
Indices Node
}
type Node interface {
String() string
isNode()
}
type Ident struct {
Name Node
}
type Object struct {
Name Node
}
type Builtin struct {
Name Node
}
type String string
type Any struct{}
type ValueSpec struct {
Names Node
Type Node
Values Node
}
type List struct {
Head Node
Tail Node
}
type GenDecl struct {
Tok Node
Specs Node
}
type BasicLit struct {
Kind Node
Value Node
}
// An IntegerLiteral is a constant expression made up of only integer basic literals and the "+" and "-" unary operators.
// That is, 0, -4, -+42 are all integer literals, but 1 + 2 is not.
type IntegerLiteral struct {
Value Node
}
type BinaryExpr struct {
X Node
Op Node
Y Node
}
type ForStmt struct {
Init Node
Cond Node
Post Node
Body Node
}
type Or struct {
Nodes []Node
}
type Not struct {
Node Node
}
// A TrulyConstantExpression is a constant expression that does not make use of any identifiers.
// It is constant even under varying build tags.
type TrulyConstantExpression struct {
Value Node
}
func stringify(n Node) string {
v := reflect.ValueOf(n)
var parts []string
parts = append(parts, v.Type().Name())
for i := 0; i < v.NumField(); i++ {
parts = append(parts, fmt.Sprintf("%s", v.Field(i)))
}
return "(" + strings.Join(parts, " ") + ")"
}
func (stmt AssignStmt) String() string { return stringify(stmt) }
func (expr IndexExpr) String() string { return stringify(expr) }
func (expr IndexListExpr) String() string { return stringify(expr) }
func (id Ident) String() string { return stringify(id) }
func (spec ValueSpec) String() string { return stringify(spec) }
func (decl GenDecl) String() string { return stringify(decl) }
func (lit BasicLit) String() string { return stringify(lit) }
func (expr BinaryExpr) String() string { return stringify(expr) }
func (stmt ForStmt) String() string { return stringify(stmt) }
func (stmt RangeStmt) String() string { return stringify(stmt) }
func (typ ArrayType) String() string { return stringify(typ) }
func (stmt DeferStmt) String() string { return stringify(stmt) }
func (typ MapType) String() string { return stringify(typ) }
func (stmt ReturnStmt) String() string { return stringify(stmt) }
func (expr SliceExpr) String() string { return stringify(expr) }
func (expr StarExpr) String() string { return stringify(expr) }
func (expr UnaryExpr) String() string { return stringify(expr) }
func (stmt SendStmt) String() string { return stringify(stmt) }
func (spec ImportSpec) String() string { return stringify(spec) }
func (stmt SelectStmt) String() string { return stringify(stmt) }
func (stmt IfStmt) String() string { return stringify(stmt) }
func (stmt IncDecStmt) String() string { return stringify(stmt) }
func (stmt GoStmt) String() string { return stringify(stmt) }
func (field Field) String() string { return stringify(field) }
func (expr SelectorExpr) String() string { return stringify(expr) }
func (typ StructType) String() string { return stringify(typ) }
func (expr KeyValueExpr) String() string { return stringify(expr) }
func (typ FuncType) String() string { return stringify(typ) }
func (lit FuncLit) String() string { return stringify(lit) }
func (decl FuncDecl) String() string { return stringify(decl) }
func (stmt BranchStmt) String() string { return stringify(stmt) }
func (expr CallExpr) String() string { return stringify(expr) }
func (clause CaseClause) String() string { return stringify(clause) }
func (typ ChanType) String() string { return stringify(typ) }
func (clause CommClause) String() string { return stringify(clause) }
func (lit CompositeLit) String() string { return stringify(lit) }
func (stmt EmptyStmt) String() string { return stringify(stmt) }
func (typ InterfaceType) String() string { return stringify(typ) }
func (stmt SwitchStmt) String() string { return stringify(stmt) }
func (expr TypeAssertExpr) String() string { return stringify(expr) }
func (spec TypeSpec) String() string { return stringify(spec) }
func (stmt TypeSwitchStmt) String() string { return stringify(stmt) }
func (nil Nil) String() string { return "nil" }
func (builtin Builtin) String() string { return stringify(builtin) }
func (obj Object) String() string { return stringify(obj) }
func (fn Function) String() string { return stringify(fn) }
func (el Ellipsis) String() string { return stringify(el) }
func (not Not) String() string { return stringify(not) }
func (lit IntegerLiteral) String() string { return stringify(lit) }
func (expr TrulyConstantExpression) String() string { return stringify(expr) }
func (or Or) String() string {
s := "(Or"
for _, node := range or.Nodes {
s += " "
s += node.String()
}
s += ")"
return s
}
func isProperList(l List) bool {
if l.Head == nil && l.Tail == nil {
return true
}
switch tail := l.Tail.(type) {
case nil:
return false
case List:
return isProperList(tail)
default:
return false
}
}
func (l List) String() string {
if l.Head == nil && l.Tail == nil {
return "[]"
}
if isProperList(l) {
// pretty-print the list
var objs []string
for l.Head != nil {
objs = append(objs, l.Head.String())
l = l.Tail.(List)
}
return fmt.Sprintf("[%s]", strings.Join(objs, " "))
}
return fmt.Sprintf("%s:%s", l.Head, l.Tail)
}
func (bind Binding) String() string {
if bind.Node == nil {
return bind.Name
}
return fmt.Sprintf("%s@%s", bind.Name, bind.Node)
}
func (s String) String() string { return fmt.Sprintf("%q", string(s)) }
func (tok Token) String() string {
return fmt.Sprintf("%q", strings.ToUpper(token.Token(tok).String()))
}
func (Any) String() string { return "_" }
func (AssignStmt) isNode() {}
func (IndexExpr) isNode() {}
func (IndexListExpr) isNode() {}
func (Ident) isNode() {}
func (ValueSpec) isNode() {}
func (GenDecl) isNode() {}
func (BasicLit) isNode() {}
func (BinaryExpr) isNode() {}
func (ForStmt) isNode() {}
func (RangeStmt) isNode() {}
func (ArrayType) isNode() {}
func (DeferStmt) isNode() {}
func (MapType) isNode() {}
func (ReturnStmt) isNode() {}
func (SliceExpr) isNode() {}
func (StarExpr) isNode() {}
func (UnaryExpr) isNode() {}
func (SendStmt) isNode() {}
func (ImportSpec) isNode() {}
func (SelectStmt) isNode() {}
func (IfStmt) isNode() {}
func (IncDecStmt) isNode() {}
func (GoStmt) isNode() {}
func (Field) isNode() {}
func (SelectorExpr) isNode() {}
func (StructType) isNode() {}
func (KeyValueExpr) isNode() {}
func (FuncType) isNode() {}
func (FuncLit) isNode() {}
func (FuncDecl) isNode() {}
func (BranchStmt) isNode() {}
func (CallExpr) isNode() {}
func (CaseClause) isNode() {}
func (ChanType) isNode() {}
func (CommClause) isNode() {}
func (CompositeLit) isNode() {}
func (EmptyStmt) isNode() {}
func (InterfaceType) isNode() {}
func (SwitchStmt) isNode() {}
func (TypeAssertExpr) isNode() {}
func (TypeSpec) isNode() {}
func (TypeSwitchStmt) isNode() {}
func (Nil) isNode() {}
func (Builtin) isNode() {}
func (Object) isNode() {}
func (Function) isNode() {}
func (Ellipsis) isNode() {}
func (Or) isNode() {}
func (List) isNode() {}
func (String) isNode() {}
func (Token) isNode() {}
func (Any) isNode() {}
func (Binding) isNode() {}
func (Not) isNode() {}
func (IntegerLiteral) isNode() {}
func (TrulyConstantExpression) isNode() {}

View File

@@ -0,0 +1,12 @@
//go:build gofuzz
// +build gofuzz
package printf
func Fuzz(data []byte) int {
_, err := Parse(string(data))
if err == nil {
return 1
}
return 0
}

View File

@@ -0,0 +1,197 @@
// Package printf implements a parser for fmt.Printf-style format
// strings.
//
// It parses verbs according to the following syntax:
// Numeric -> '0'-'9'
// Letter -> 'a'-'z' | 'A'-'Z'
// Index -> '[' Numeric+ ']'
// Star -> '*'
// Star -> Index '*'
//
// Precision -> Numeric+ | Star
// Width -> Numeric+ | Star
//
// WidthAndPrecision -> Width '.' Precision
// WidthAndPrecision -> Width '.'
// WidthAndPrecision -> Width
// WidthAndPrecision -> '.' Precision
// WidthAndPrecision -> '.'
//
// Flag -> '+' | '-' | '#' | ' ' | '0'
// Verb -> Letter | '%'
//
// Input -> '%' [ Flag+ ] [ WidthAndPrecision ] [ Index ] Verb
package printf
import (
"errors"
"regexp"
"strconv"
"strings"
)
// ErrInvalid is returned for invalid format strings or verbs.
var ErrInvalid = errors.New("invalid format string")
type Verb struct {
Letter rune
Flags string
Width Argument
Precision Argument
// Which value in the argument list the verb uses.
// -1 denotes the next argument,
// values > 0 denote explicit arguments.
// The value 0 denotes that no argument is consumed. This is the case for %%.
Value int
Raw string
}
// Argument is an implicit or explicit width or precision.
type Argument interface {
isArgument()
}
// The Default value, when no width or precision is provided.
type Default struct{}
// Zero is the implicit zero value.
// This value may only appear for precisions in format strings like %6.f
type Zero struct{}
// Star is a * value, which may either refer to the next argument (Index == -1) or an explicit argument.
type Star struct{ Index int }
// A Literal value, such as 6 in %6d.
type Literal int
func (Default) isArgument() {}
func (Zero) isArgument() {}
func (Star) isArgument() {}
func (Literal) isArgument() {}
// Parse parses f and returns a list of actions.
// An action may either be a literal string, or a Verb.
func Parse(f string) ([]interface{}, error) {
var out []interface{}
for len(f) > 0 {
if f[0] == '%' {
v, n, err := ParseVerb(f)
if err != nil {
return nil, err
}
f = f[n:]
out = append(out, v)
} else {
n := strings.IndexByte(f, '%')
if n > -1 {
out = append(out, f[:n])
f = f[n:]
} else {
out = append(out, f)
f = ""
}
}
}
return out, nil
}
func atoi(s string) int {
n, _ := strconv.Atoi(s)
return n
}
// ParseVerb parses the verb at the beginning of f.
// It returns the verb, how much of the input was consumed, and an error, if any.
func ParseVerb(f string) (Verb, int, error) {
if len(f) < 2 {
return Verb{}, 0, ErrInvalid
}
const (
flags = 1
width = 2
widthStar = 3
widthIndex = 5
dot = 6
prec = 7
precStar = 8
precIndex = 10
verbIndex = 11
verb = 12
)
m := re.FindStringSubmatch(f)
if m == nil {
return Verb{}, 0, ErrInvalid
}
v := Verb{
Letter: []rune(m[verb])[0],
Flags: m[flags],
Raw: m[0],
}
if m[width] != "" {
// Literal width
v.Width = Literal(atoi(m[width]))
} else if m[widthStar] != "" {
// Star width
if m[widthIndex] != "" {
v.Width = Star{atoi(m[widthIndex])}
} else {
v.Width = Star{-1}
}
} else {
// Default width
v.Width = Default{}
}
if m[dot] == "" {
// default precision
v.Precision = Default{}
} else {
if m[prec] != "" {
// Literal precision
v.Precision = Literal(atoi(m[prec]))
} else if m[precStar] != "" {
// Star precision
if m[precIndex] != "" {
v.Precision = Star{atoi(m[precIndex])}
} else {
v.Precision = Star{-1}
}
} else {
// Zero precision
v.Precision = Zero{}
}
}
if m[verb] == "%" {
v.Value = 0
} else if m[verbIndex] != "" {
v.Value = atoi(m[verbIndex])
} else {
v.Value = -1
}
return v, len(m[0]), nil
}
const (
flags = `([+#0 -]*)`
verb = `([a-zA-Z%])`
index = `(?:\[([0-9]+)\])`
star = `((` + index + `)?\*)`
width1 = `([0-9]+)`
width2 = star
width = `(?:` + width1 + `|` + width2 + `)`
precision = width
widthAndPrecision = `(?:(?:` + width + `)?(?:(\.)(?:` + precision + `)?)?)`
)
var re = regexp.MustCompile(`^%` + flags + widthAndPrecision + `?` + index + `?` + verb)

Some files were not shown because too many files have changed in this diff Show More