staticcheck (#313)

* CI: use staticcheck for linting

This commit switches the linter for Go code from golint to staticcheck.
Golint has been deprecated since last year and staticcheck is a
recommended replacement.

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>

* revendor

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>

* cmd,pkg: fix lint warnings

Signed-off-by: Lucas Servén Marín <lserven@gmail.com>
This commit is contained in:
Lucas Servén Marín
2022-05-19 19:45:43 +02:00
committed by GitHub
parent 93f46e03ea
commit 50fbc2eec2
227 changed files with 55458 additions and 2689 deletions

View File

@@ -0,0 +1,10 @@
This package is a copy of cmd/go/internal/cache.
Differences from upstream:
- we continue to use renameio instead of lockedfile for writing trim.txt
- we still use I/O helpers that work with earlier versions of Go.
- we use a cache directory specific to Staticcheck
- we use a Staticcheck-specific salt
The last upstream commit we've looked at was:
06ac303f6a14b133254f757e54599c48e3c2a4ad

View File

@@ -0,0 +1,533 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cache implements a build artifact cache.
//
// This package is a slightly modified fork of Go's
// cmd/go/internal/cache package.
package cache
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"honnef.co/go/tools/internal/renameio"
)
// An ActionID is a cache action key, the hash of a complete description of a
// repeatable computation (command line, environment variables,
// input file contents, executable contents).
type ActionID [HashSize]byte
// An OutputID is a cache output key, the hash of an output of a computation.
type OutputID [HashSize]byte
// A Cache is a package cache, backed by a file system directory tree.
type Cache struct {
dir string
now func() time.Time
salt []byte
}
// Open opens and returns the cache in the given directory.
//
// It is safe for multiple processes on a single machine to use the
// same cache directory in a local file system simultaneously.
// They will coordinate using operating system file locks and may
// duplicate effort but will not corrupt the cache.
//
// However, it is NOT safe for multiple processes on different machines
// to share a cache directory (for example, if the directory were stored
// in a network file system). File locking is notoriously unreliable in
// network file systems and may not suffice to protect the cache.
//
func Open(dir string) (*Cache, error) {
info, err := os.Stat(dir)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")}
}
for i := 0; i < 256; i++ {
name := filepath.Join(dir, fmt.Sprintf("%02x", i))
if err := os.MkdirAll(name, 0777); err != nil {
return nil, err
}
}
c := &Cache{
dir: dir,
now: time.Now,
}
return c, nil
}
func (c *Cache) SetSalt(b []byte) {
c.salt = b
}
// fileName returns the name of the file corresponding to the given id.
func (c *Cache) fileName(id [HashSize]byte, key string) string {
return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key)
}
// An entryNotFoundError indicates that a cache entry was not found, with an
// optional underlying reason.
type entryNotFoundError struct {
Err error
}
func (e *entryNotFoundError) Error() string {
if e.Err == nil {
return "cache entry not found"
}
return fmt.Sprintf("cache entry not found: %v", e.Err)
}
func (e *entryNotFoundError) Unwrap() error {
return e.Err
}
const (
// action entry file is "v1 <hex id> <hex out> <decimal size space-padded to 20 bytes> <unixnano space-padded to 20 bytes>\n"
hexSize = HashSize * 2
entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1
)
// verify controls whether to run the cache in verify mode.
// In verify mode, the cache always returns errMissing from Get
// but then double-checks in Put that the data being written
// exactly matches any existing entry. This provides an easy
// way to detect program behavior that would have been different
// had the cache entry been returned from Get.
//
// verify is enabled by setting the environment variable
// GODEBUG=gocacheverify=1.
var verify = false
var errVerifyMode = errors.New("gocacheverify=1")
// DebugTest is set when GODEBUG=gocachetest=1 is in the environment.
var DebugTest = false
func init() { initEnv() }
func initEnv() {
verify = false
debugHash = false
debug := strings.Split(os.Getenv("GODEBUG"), ",")
for _, f := range debug {
if f == "gocacheverify=1" {
verify = true
}
if f == "gocachehash=1" {
debugHash = true
}
if f == "gocachetest=1" {
DebugTest = true
}
}
}
// Get looks up the action ID in the cache,
// returning the corresponding output ID and file size, if any.
// Note that finding an output ID does not guarantee that the
// saved file for that output ID is still available.
func (c *Cache) Get(id ActionID) (Entry, error) {
if verify {
return Entry{}, &entryNotFoundError{Err: errVerifyMode}
}
return c.get(id)
}
type Entry struct {
OutputID OutputID
Size int64
Time time.Time
}
// get is Get but does not respect verify mode, so that Put can use it.
func (c *Cache) get(id ActionID) (Entry, error) {
missing := func(reason error) (Entry, error) {
return Entry{}, &entryNotFoundError{Err: reason}
}
f, err := os.Open(c.fileName(id, "a"))
if err != nil {
return missing(err)
}
defer f.Close()
entry := make([]byte, entrySize+1) // +1 to detect whether f is too long
if n, err := io.ReadFull(f, entry); n > entrySize {
return missing(errors.New("too long"))
} else if err != io.ErrUnexpectedEOF {
if err == io.EOF {
return missing(errors.New("file is empty"))
}
return missing(err)
} else if n < entrySize {
return missing(errors.New("entry file incomplete"))
}
if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' {
return missing(errors.New("invalid header"))
}
eid, entry := entry[3:3+hexSize], entry[3+hexSize:]
eout, entry := entry[1:1+hexSize], entry[1+hexSize:]
esize, entry := entry[1:1+20], entry[1+20:]
//lint:ignore SA4006 See https://github.com/dominikh/go-tools/issues/465
etime, entry := entry[1:1+20], entry[1+20:]
var buf [HashSize]byte
if _, err := hex.Decode(buf[:], eid); err != nil {
return missing(fmt.Errorf("decoding ID: %v", err))
} else if buf != id {
return missing(errors.New("mismatched ID"))
}
if _, err := hex.Decode(buf[:], eout); err != nil {
return missing(fmt.Errorf("decoding output ID: %v", err))
}
i := 0
for i < len(esize) && esize[i] == ' ' {
i++
}
size, err := strconv.ParseInt(string(esize[i:]), 10, 64)
if err != nil {
return missing(fmt.Errorf("parsing size: %v", err))
} else if size < 0 {
return missing(errors.New("negative size"))
}
i = 0
for i < len(etime) && etime[i] == ' ' {
i++
}
tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
if err != nil {
return missing(fmt.Errorf("parsing timestamp: %v", err))
} else if tm < 0 {
return missing(errors.New("negative timestamp"))
}
c.used(c.fileName(id, "a"))
return Entry{buf, size, time.Unix(0, tm)}, nil
}
// GetFile looks up the action ID in the cache and returns
// the name of the corresponding data file.
func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) {
entry, err = c.Get(id)
if err != nil {
return "", Entry{}, err
}
file = c.OutputFile(entry.OutputID)
info, err := os.Stat(file)
if err != nil {
return "", Entry{}, &entryNotFoundError{Err: err}
}
if info.Size() != entry.Size {
return "", Entry{}, &entryNotFoundError{Err: errors.New("file incomplete")}
}
return file, entry, nil
}
// GetBytes looks up the action ID in the cache and returns
// the corresponding output bytes.
// GetBytes should only be used for data that can be expected to fit in memory.
func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) {
entry, err := c.Get(id)
if err != nil {
return nil, entry, err
}
data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID))
if sha256.Sum256(data) != entry.OutputID {
return nil, entry, &entryNotFoundError{Err: errors.New("bad checksum")}
}
return data, entry, nil
}
// OutputFile returns the name of the cache file storing output with the given OutputID.
func (c *Cache) OutputFile(out OutputID) string {
file := c.fileName(out, "d")
c.used(file)
return file
}
// Time constants for cache expiration.
//
// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour),
// to avoid causing many unnecessary inode updates. The mtimes therefore
// roughly reflect "time of last use" but may in fact be older by at most an hour.
//
// We scan the cache for entries to delete at most once per trimInterval (1 day).
//
// When we do scan the cache, we delete entries that have not been used for
// at least trimLimit (5 days). Statistics gathered from a month of usage by
// Go developers found that essentially all reuse of cached entries happened
// within 5 days of the previous reuse. See golang.org/issue/22990.
const (
mtimeInterval = 1 * time.Hour
trimInterval = 24 * time.Hour
trimLimit = 5 * 24 * time.Hour
)
// used makes a best-effort attempt to update mtime on file,
// so that mtime reflects cache access time.
//
// Because the reflection only needs to be approximate,
// and to reduce the amount of disk activity caused by using
// cache entries, used only updates the mtime if the current
// mtime is more than an hour old. This heuristic eliminates
// nearly all of the mtime updates that would otherwise happen,
// while still keeping the mtimes useful for cache trimming.
func (c *Cache) used(file string) {
info, err := os.Stat(file)
if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval {
return
}
os.Chtimes(file, c.now(), c.now())
}
// Trim removes old cache entries that are likely not to be reused.
func (c *Cache) Trim() {
now := c.now()
// We maintain in dir/trim.txt the time of the last completed cache trim.
// If the cache has been trimmed recently enough, do nothing.
// This is the common case.
data, _ := renameio.ReadFile(filepath.Join(c.dir, "trim.txt"))
t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval {
return
}
// Trim each of the 256 subdirectories.
// We subtract an additional mtimeInterval
// to account for the imprecision of our "last used" mtimes.
cutoff := now.Add(-trimLimit - mtimeInterval)
for i := 0; i < 256; i++ {
subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i))
c.trimSubdir(subdir, cutoff)
}
// Ignore errors from here: if we don't write the complete timestamp, the
// cache will appear older than it is, and we'll trim it again next time.
renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666)
}
// trimSubdir trims a single cache subdirectory.
func (c *Cache) trimSubdir(subdir string, cutoff time.Time) {
// Read all directory entries from subdir before removing
// any files, in case removing files invalidates the file offset
// in the directory scan. Also, ignore error from f.Readdirnames,
// because we don't care about reporting the error and we still
// want to process any entries found before the error.
f, err := os.Open(subdir)
if err != nil {
return
}
names, _ := f.Readdirnames(-1)
f.Close()
for _, name := range names {
// Remove only cache entries (xxxx-a and xxxx-d).
if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") {
continue
}
entry := filepath.Join(subdir, name)
info, err := os.Stat(entry)
if err == nil && info.ModTime().Before(cutoff) {
os.Remove(entry)
}
}
}
// putIndexEntry adds an entry to the cache recording that executing the action
// with the given id produces an output with the given output id (hash) and size.
func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error {
// Note: We expect that for one reason or another it may happen
// that repeating an action produces a different output hash
// (for example, if the output contains a time stamp or temp dir name).
// While not ideal, this is also not a correctness problem, so we
// don't make a big deal about it. In particular, we leave the action
// cache entries writable specifically so that they can be overwritten.
//
// Setting GODEBUG=gocacheverify=1 does make a big deal:
// in verify mode we are double-checking that the cache entries
// are entirely reproducible. As just noted, this may be unrealistic
// in some cases but the check is also useful for shaking out real bugs.
entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())
if verify && allowVerify {
old, err := c.get(id)
if err == nil && (old.OutputID != out || old.Size != size) {
// panic to show stack trace, so we can see what code is generating this cache entry.
msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size)
panic(msg)
}
}
file := c.fileName(id, "a")
// Copy file to cache directory.
mode := os.O_WRONLY | os.O_CREATE
f, err := os.OpenFile(file, mode, 0666)
if err != nil {
return err
}
_, err = f.WriteString(entry)
if err == nil {
// Truncate the file only *after* writing it.
// (This should be a no-op, but truncate just in case of previous corruption.)
//
// This differs from ioutil.WriteFile, which truncates to 0 *before* writing
// via os.O_TRUNC. Truncating only after writing ensures that a second write
// of the same content to the same file is idempotent, and does not — even
// temporarily! — undo the effect of the first write.
err = f.Truncate(int64(len(entry)))
}
if closeErr := f.Close(); err == nil {
err = closeErr
}
if err != nil {
// TODO(bcmills): This Remove potentially races with another go command writing to file.
// Can we eliminate it?
os.Remove(file)
return err
}
os.Chtimes(file, c.now(), c.now()) // mainly for tests
return nil
}
// Put stores the given output in the cache as the output for the action ID.
// It may read file twice. The content of file must not change between the two passes.
func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
return c.put(id, file, true)
}
// PutNoVerify is like Put but disables the verify check
// when GODEBUG=goverifycache=1 is set.
// It is meant for data that is OK to cache but that we expect to vary slightly from run to run,
// like test output containing times and the like.
func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
return c.put(id, file, false)
}
func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) {
// Compute output ID.
h := sha256.New()
if _, err := file.Seek(0, 0); err != nil {
return OutputID{}, 0, err
}
size, err := io.Copy(h, file)
if err != nil {
return OutputID{}, 0, err
}
var out OutputID
h.Sum(out[:0])
// Copy to cached output file (if not already present).
if err := c.copyFile(file, out, size); err != nil {
return out, size, err
}
// Add to cache index.
return out, size, c.putIndexEntry(id, out, size, allowVerify)
}
// PutBytes stores the given bytes in the cache as the output for the action ID.
func (c *Cache) PutBytes(id ActionID, data []byte) error {
_, _, err := c.Put(id, bytes.NewReader(data))
return err
}
// copyFile copies file into the cache, expecting it to have the given
// output ID and size, if that file is not present already.
func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
name := c.fileName(out, "d")
info, err := os.Stat(name)
if err == nil && info.Size() == size {
// Check hash.
if f, err := os.Open(name); err == nil {
h := sha256.New()
io.Copy(h, f)
f.Close()
var out2 OutputID
h.Sum(out2[:0])
if out == out2 {
return nil
}
}
// Hash did not match. Fall through and rewrite file.
}
// Copy file to cache directory.
mode := os.O_RDWR | os.O_CREATE
if err == nil && info.Size() > size { // shouldn't happen but fix in case
mode |= os.O_TRUNC
}
f, err := os.OpenFile(name, mode, 0666)
if err != nil {
return err
}
defer f.Close()
if size == 0 {
// File now exists with correct size.
// Only one possible zero-length file, so contents are OK too.
// Early return here makes sure there's a "last byte" for code below.
return nil
}
// From here on, if any of the I/O writing the file fails,
// we make a best-effort attempt to truncate the file f
// before returning, to avoid leaving bad bytes in the file.
// Copy file to f, but also into h to double-check hash.
if _, err := file.Seek(0, 0); err != nil {
f.Truncate(0)
return err
}
h := sha256.New()
w := io.MultiWriter(f, h)
if _, err := io.CopyN(w, file, size-1); err != nil {
f.Truncate(0)
return err
}
// Check last byte before writing it; writing it will make the size match
// what other processes expect to find and might cause them to start
// using the file.
buf := make([]byte, 1)
if _, err := file.Read(buf); err != nil {
f.Truncate(0)
return err
}
h.Write(buf)
sum := h.Sum(nil)
if !bytes.Equal(sum, out[:]) {
f.Truncate(0)
return fmt.Errorf("file content changed underfoot")
}
// Commit cache file entry.
if _, err := f.Write(buf); err != nil {
f.Truncate(0)
return err
}
if err := f.Close(); err != nil {
// Data might not have been written,
// but file may look like it is the right size.
// To be extra careful, remove cached file.
os.Remove(name)
return err
}
os.Chtimes(name, c.now(), c.now()) // mainly for tests
return nil
}

View File

@@ -0,0 +1,85 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sync"
)
// Default returns the default cache to use.
func Default() (*Cache, error) {
defaultOnce.Do(initDefaultCache)
return defaultCache, defaultDirErr
}
var (
defaultOnce sync.Once
defaultCache *Cache
)
// cacheREADME is a message stored in a README in the cache directory.
// Because the cache lives outside the normal Go trees, we leave the
// README as a courtesy to explain where it came from.
const cacheREADME = `This directory holds cached build artifacts from staticcheck.
`
// initDefaultCache does the work of finding the default cache
// the first time Default is called.
func initDefaultCache() {
dir := DefaultDir()
if err := os.MkdirAll(dir, 0777); err != nil {
log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
}
if _, err := os.Stat(filepath.Join(dir, "README")); err != nil {
// Best effort.
ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666)
}
c, err := Open(dir)
if err != nil {
log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
}
defaultCache = c
}
var (
defaultDirOnce sync.Once
defaultDir string
defaultDirErr error
)
// DefaultDir returns the effective STATICCHECK_CACHE setting.
func DefaultDir() string {
// Save the result of the first call to DefaultDir for later use in
// initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that
// subprocesses will inherit it, but that means initDefaultCache can't
// otherwise distinguish between an explicit "off" and a UserCacheDir error.
defaultDirOnce.Do(func() {
defaultDir = os.Getenv("STATICCHECK_CACHE")
if filepath.IsAbs(defaultDir) {
return
}
if defaultDir != "" {
defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not an absolute path")
return
}
// Compute default location.
dir, err := os.UserCacheDir()
if err != nil {
defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not defined and %v", err)
return
}
defaultDir = filepath.Join(dir, "staticcheck")
})
return defaultDir
}

View File

@@ -0,0 +1,163 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cache
import (
"bytes"
"crypto/sha256"
"fmt"
"hash"
"io"
"os"
"sync"
)
var debugHash = false // set when GODEBUG=gocachehash=1
// HashSize is the number of bytes in a hash.
const HashSize = 32
// A Hash provides access to the canonical hash function used to index the cache.
// The current implementation uses salted SHA256, but clients must not assume this.
type Hash struct {
h hash.Hash
name string // for debugging
buf *bytes.Buffer // for verify
}
// Subkey returns an action ID corresponding to mixing a parent
// action ID with a string description of the subkey.
func Subkey(parent ActionID, desc string) ActionID {
h := sha256.New()
h.Write([]byte("subkey:"))
h.Write(parent[:])
h.Write([]byte(desc))
var out ActionID
h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out)
}
if verify {
hashDebug.Lock()
hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc)
hashDebug.Unlock()
}
return out
}
// NewHash returns a new Hash.
// The caller is expected to Write data to it and then call Sum.
func (c *Cache) NewHash(name string) *Hash {
h := &Hash{h: sha256.New(), name: name}
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name)
}
h.Write(c.salt)
if verify {
h.buf = new(bytes.Buffer)
}
return h
}
// Write writes data to the running hash.
func (h *Hash) Write(b []byte) (int, error) {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b)
}
if h.buf != nil {
h.buf.Write(b)
}
return h.h.Write(b)
}
// Sum returns the hash of the data written previously.
func (h *Hash) Sum() [HashSize]byte {
var out [HashSize]byte
h.h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out)
}
if h.buf != nil {
hashDebug.Lock()
if hashDebug.m == nil {
hashDebug.m = make(map[[HashSize]byte]string)
}
hashDebug.m[out] = h.buf.String()
hashDebug.Unlock()
}
return out
}
// In GODEBUG=gocacheverify=1 mode,
// hashDebug holds the input to every computed hash ID,
// so that we can work backward from the ID involved in a
// cache entry mismatch to a description of what should be there.
var hashDebug struct {
sync.Mutex
m map[[HashSize]byte]string
}
// reverseHash returns the input used to compute the hash id.
func reverseHash(id [HashSize]byte) string {
hashDebug.Lock()
s := hashDebug.m[id]
hashDebug.Unlock()
return s
}
var hashFileCache struct {
sync.Mutex
m map[string][HashSize]byte
}
// FileHash returns the hash of the named file.
// It caches repeated lookups for a given file,
// and the cache entry for a file can be initialized
// using SetFileHash.
// The hash used by FileHash is not the same as
// the hash used by NewHash.
func FileHash(file string) ([HashSize]byte, error) {
hashFileCache.Lock()
out, ok := hashFileCache.m[file]
hashFileCache.Unlock()
if ok {
return out, nil
}
h := sha256.New()
f, err := os.Open(file)
if err != nil {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
}
return [HashSize]byte{}, err
}
_, err = io.Copy(h, f)
f.Close()
if err != nil {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
}
return [HashSize]byte{}, err
}
h.Sum(out[:0])
if debugHash {
fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out)
}
SetFileHash(file, out)
return out, nil
}
// SetFileHash sets the hash returned by FileHash for file.
func SetFileHash(file string, sum [HashSize]byte) {
hashFileCache.Lock()
if hashFileCache.m == nil {
hashFileCache.m = make(map[string][HashSize]byte)
}
hashFileCache.m[file] = sum
hashFileCache.Unlock()
}

742
vendor/honnef.co/go/tools/lintcmd/cmd.go vendored Normal file
View File

@@ -0,0 +1,742 @@
// Package lintcmd implements the frontend of an analysis runner.
// It serves as the entry-point for the staticcheck command, and can also be used to implement custom linters that behave like staticcheck.
package lintcmd
import (
"bufio"
"encoding/gob"
"flag"
"fmt"
"go/token"
"io"
"log"
"os"
"path/filepath"
"reflect"
"runtime"
"runtime/pprof"
"runtime/trace"
"sort"
"strings"
"sync"
"time"
"honnef.co/go/tools/analysis/lint"
"honnef.co/go/tools/config"
"honnef.co/go/tools/go/loader"
"honnef.co/go/tools/lintcmd/version"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/buildutil"
)
type BuildConfig struct {
Name string
Envs []string
Flags []string
}
// Command represents a linter command line tool.
type Command struct {
name string
analyzers map[string]*lint.Analyzer
version string
machineVersion string
flags struct {
fs *flag.FlagSet
tags string
tests bool
showIgnored bool
formatter string
// mutually exclusive mode flags
explain string
printVersion bool
listChecks bool
merge bool
matrix bool
debugCpuprofile string
debugMemprofile string
debugVersion bool
debugNoCompileErrors bool
debugMeasureAnalyzers string
debugTrace string
checks list
fail list
goVersion versionFlag
}
}
// NewCommand returns a new Command.
func NewCommand(name string) *Command {
cmd := &Command{
name: name,
analyzers: map[string]*lint.Analyzer{},
version: "devel",
machineVersion: "devel",
}
cmd.initFlagSet(name)
return cmd
}
// SetVersion sets the command's version.
// It is divided into a human part and a machine part.
// For example, Staticcheck 2020.2.1 had the human version "2020.2.1" and the machine version "v0.1.1".
// If you only use Semver, you can set both parts to the same value.
//
// Calling this method is optional. Both versions default to "devel", and we'll attempt to deduce more version information from the Go module.
func (cmd *Command) SetVersion(human, machine string) {
cmd.version = human
cmd.machineVersion = machine
}
// FlagSet returns the command's flag set.
// This can be used to add additional command line arguments.
func (cmd *Command) FlagSet() *flag.FlagSet {
return cmd.flags.fs
}
// AddAnalyzers adds analyzers to the command.
// These are lint.Analyzer analyzers, which wrap analysis.Analyzer analyzers, bundling them with structured documentation.
//
// To add analysis.Analyzer analyzers without providing structured documentation, use AddBareAnalyzers.
func (cmd *Command) AddAnalyzers(as ...*lint.Analyzer) {
for _, a := range as {
cmd.analyzers[a.Analyzer.Name] = a
}
}
// AddBareAnalyzers adds bare analyzers to the command.
func (cmd *Command) AddBareAnalyzers(as ...*analysis.Analyzer) {
for _, a := range as {
var title, text string
if idx := strings.Index(a.Doc, "\n\n"); idx > -1 {
title = a.Doc[:idx]
text = a.Doc[idx+2:]
}
doc := &lint.Documentation{
Title: title,
Text: text,
Severity: lint.SeverityWarning,
}
cmd.analyzers[a.Name] = &lint.Analyzer{
Doc: doc,
Analyzer: a,
}
}
}
func (cmd *Command) initFlagSet(name string) {
flags := flag.NewFlagSet("", flag.ExitOnError)
cmd.flags.fs = flags
flags.Usage = usage(name, flags)
flags.StringVar(&cmd.flags.tags, "tags", "", "List of `build tags`")
flags.BoolVar(&cmd.flags.tests, "tests", true, "Include tests")
flags.BoolVar(&cmd.flags.printVersion, "version", false, "Print version and exit")
flags.BoolVar(&cmd.flags.showIgnored, "show-ignored", false, "Don't filter ignored diagnostics")
flags.StringVar(&cmd.flags.formatter, "f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')")
flags.StringVar(&cmd.flags.explain, "explain", "", "Print description of `check`")
flags.BoolVar(&cmd.flags.listChecks, "list-checks", false, "List all available checks")
flags.BoolVar(&cmd.flags.merge, "merge", false, "Merge results of multiple Staticcheck runs")
flags.BoolVar(&cmd.flags.matrix, "matrix", false, "Read a build config matrix from stdin")
flags.StringVar(&cmd.flags.debugCpuprofile, "debug.cpuprofile", "", "Write CPU profile to `file`")
flags.StringVar(&cmd.flags.debugMemprofile, "debug.memprofile", "", "Write memory profile to `file`")
flags.BoolVar(&cmd.flags.debugVersion, "debug.version", false, "Print detailed version information about this program")
flags.BoolVar(&cmd.flags.debugNoCompileErrors, "debug.no-compile-errors", false, "Don't print compile errors")
flags.StringVar(&cmd.flags.debugMeasureAnalyzers, "debug.measure-analyzers", "", "Write analysis measurements to `file`. `file` will be opened for appending if it already exists.")
flags.StringVar(&cmd.flags.debugTrace, "debug.trace", "", "Write trace to `file`")
cmd.flags.checks = list{"inherit"}
cmd.flags.fail = list{"all"}
cmd.flags.goVersion = versionFlag("module")
flags.Var(&cmd.flags.checks, "checks", "Comma-separated list of `checks` to enable.")
flags.Var(&cmd.flags.fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.")
flags.Var(&cmd.flags.goVersion, "go", "Target Go `version` in the format '1.x', or the literal 'module' to use the module's Go version")
}
type list []string
func (list *list) String() string {
return `"` + strings.Join(*list, ",") + `"`
}
func (list *list) Set(s string) error {
if s == "" {
*list = nil
return nil
}
elems := strings.Split(s, ",")
for i, elem := range elems {
elems[i] = strings.TrimSpace(elem)
}
*list = elems
return nil
}
type versionFlag string
func (v *versionFlag) String() string {
return fmt.Sprintf("%q", string(*v))
}
func (v *versionFlag) Set(s string) error {
if s == "module" {
*v = "module"
} else {
var vf lint.VersionFlag
if err := vf.Set(s); err != nil {
return err
}
*v = versionFlag(s)
}
return nil
}
// ParseFlags parses command line flags.
// It must be called before calling Run.
// After calling ParseFlags, the values of flags can be accessed.
//
// Example:
//
// cmd.ParseFlags(os.Args[1:])
func (cmd *Command) ParseFlags(args []string) {
cmd.flags.fs.Parse(args)
}
// diagnosticDescriptor represents the uniquiely identifying information of diagnostics.
type diagnosticDescriptor struct {
Position token.Position
End token.Position
Category string
Message string
}
func (diag diagnostic) descriptor() diagnosticDescriptor {
return diagnosticDescriptor{
Position: diag.Position,
End: diag.End,
Category: diag.Category,
Message: diag.Message,
}
}
type run struct {
checkedFiles map[string]struct{}
diagnostics map[diagnosticDescriptor]diagnostic
}
func runFromLintResult(res LintResult) run {
out := run{
checkedFiles: map[string]struct{}{},
diagnostics: map[diagnosticDescriptor]diagnostic{},
}
for _, cf := range res.CheckedFiles {
out.checkedFiles[cf] = struct{}{}
}
for _, diag := range res.Diagnostics {
out.diagnostics[diag.descriptor()] = diag
}
return out
}
func decodeGob(br io.ByteReader) ([]run, error) {
var runs []run
for {
var res LintResult
if err := gob.NewDecoder(br.(io.Reader)).Decode(&res); err != nil {
if err == io.EOF {
break
} else {
return nil, err
}
}
runs = append(runs, runFromLintResult(res))
}
return runs, nil
}
// Run runs all registered analyzers and reports their findings.
// It always calls os.Exit and does not return.
func (cmd *Command) Run() {
var measureAnalyzers func(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration)
if path := cmd.flags.debugMeasureAnalyzers; path != "" {
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.Fatal(err)
}
mu := &sync.Mutex{}
measureAnalyzers = func(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration) {
mu.Lock()
defer mu.Unlock()
// FIXME(dh): print pkg.ID
if _, err := fmt.Fprintf(f, "%s\t%s\t%d\n", analysis.Name, pkg, d.Nanoseconds()); err != nil {
log.Println("error writing analysis measurements:", err)
}
}
}
if path := cmd.flags.debugCpuprofile; path != "" {
f, err := os.Create(path)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
}
if path := cmd.flags.debugTrace; path != "" {
f, err := os.Create(path)
if err != nil {
log.Fatal(err)
}
trace.Start(f)
}
defaultChecks := []string{"all"}
cs := make([]*lint.Analyzer, 0, len(cmd.analyzers))
for _, a := range cmd.analyzers {
cs = append(cs, a)
if a.Doc.NonDefault {
defaultChecks = append(defaultChecks, "-"+a.Analyzer.Name)
}
}
config.DefaultConfig.Checks = defaultChecks
switch {
case cmd.flags.debugVersion:
version.Verbose(cmd.version, cmd.machineVersion)
cmd.exit(0)
case cmd.flags.listChecks:
sort.Slice(cs, func(i, j int) bool {
return cs[i].Analyzer.Name < cs[j].Analyzer.Name
})
for _, c := range cs {
var title string
if c.Doc != nil {
title = c.Doc.Title
}
fmt.Printf("%s %s\n", c.Analyzer.Name, title)
}
cmd.exit(0)
case cmd.flags.printVersion:
version.Print(cmd.version, cmd.machineVersion)
cmd.exit(0)
case cmd.flags.explain != "":
explain := cmd.flags.explain
check, ok := cmd.analyzers[explain]
if !ok {
fmt.Fprintln(os.Stderr, "Couldn't find check", explain)
cmd.exit(1)
}
if check.Analyzer.Doc == "" {
fmt.Fprintln(os.Stderr, explain, "has no documentation")
cmd.exit(1)
}
fmt.Println(check.Doc)
fmt.Println("Online documentation\n https://staticcheck.io/docs/checks#" + check.Analyzer.Name)
cmd.exit(0)
case cmd.flags.merge:
var runs []run
if len(cmd.flags.fs.Args()) == 0 {
var err error
runs, err = decodeGob(bufio.NewReader(os.Stdin))
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("couldn't parse stdin: %s", err))
cmd.exit(1)
}
} else {
for _, path := range cmd.flags.fs.Args() {
someRuns, err := func(path string) ([]run, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
br := bufio.NewReader(f)
return decodeGob(br)
}(path)
if err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("couldn't parse file %s: %s", path, err))
cmd.exit(1)
}
runs = append(runs, someRuns...)
}
}
relevantDiagnostics := mergeRuns(runs)
cmd.printDiagnostics(cs, relevantDiagnostics)
default:
switch cmd.flags.formatter {
case "text", "stylish", "json", "sarif", "binary", "null":
default:
fmt.Fprintf(os.Stderr, "unsupported output format %q\n", cmd.flags.formatter)
cmd.exit(2)
}
var bconfs []BuildConfig
if cmd.flags.matrix {
if cmd.flags.tags != "" {
fmt.Fprintln(os.Stderr, "cannot use -matrix and -tags together")
cmd.exit(2)
}
var err error
bconfs, err = parseBuildConfigs(os.Stdin)
if err != nil {
if perr, ok := err.(parseBuildConfigError); ok {
fmt.Fprintf(os.Stderr, "<stdin>:%d couldn't parse build matrix: %s\n", perr.line, perr.err)
} else {
fmt.Fprintln(os.Stderr, err)
}
os.Exit(2)
}
} else {
bc := BuildConfig{}
if cmd.flags.tags != "" {
// Validate that the tags argument is well-formed. go/packages
// doesn't detect malformed build flags and returns unhelpful
// errors.
tf := buildutil.TagsFlag{}
if err := tf.Set(cmd.flags.tags); err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("invalid value %q for flag -tags: %s", cmd.flags.tags, err))
cmd.exit(1)
}
bc.Flags = []string{"-tags", cmd.flags.tags}
}
bconfs = append(bconfs, bc)
}
var runs []run
for _, bconf := range bconfs {
res, err := doLint(cs, cmd.flags.fs.Args(), &options{
BuildConfig: bconf,
LintTests: cmd.flags.tests,
GoVersion: string(cmd.flags.goVersion),
Config: config.Config{
Checks: cmd.flags.checks,
},
PrintAnalyzerMeasurement: measureAnalyzers,
})
if err != nil {
fmt.Fprintln(os.Stderr, err)
cmd.exit(1)
}
for _, w := range res.Warnings {
fmt.Fprintln(os.Stderr, "warning:", w)
}
cwd, err := os.Getwd()
if err != nil {
cwd = ""
}
relPath := func(s string) string {
if cwd == "" {
return filepath.ToSlash(s)
}
out, err := filepath.Rel(cwd, s)
if err != nil {
return filepath.ToSlash(s)
}
return filepath.ToSlash(out)
}
if cmd.flags.formatter == "binary" {
for i, s := range res.CheckedFiles {
res.CheckedFiles[i] = relPath(s)
}
for i := range res.Diagnostics {
// We turn all paths into relative, /-separated paths. This is to make -merge work correctly when
// merging runs from different OSs, with different absolute paths.
//
// We zero out Offset, because checkouts of code on different OSs may have different kinds of
// newlines and thus different offsets. We don't ever make use of the Offset, anyway. Line and
// column numbers are precomputed.
d := &res.Diagnostics[i]
d.Position.Filename = relPath(d.Position.Filename)
d.Position.Offset = 0
d.End.Filename = relPath(d.End.Filename)
d.End.Offset = 0
for j := range d.Related {
r := &d.Related[j]
r.Position.Filename = relPath(r.Position.Filename)
r.Position.Offset = 0
r.End.Filename = relPath(r.End.Filename)
r.End.Offset = 0
}
}
err := gob.NewEncoder(os.Stdout).Encode(res)
if err != nil {
fmt.Fprintf(os.Stderr, "failed writing output: %s\n", err)
cmd.exit(2)
}
} else {
runs = append(runs, runFromLintResult(res))
}
}
if cmd.flags.formatter != "binary" {
diags := mergeRuns(runs)
cmd.printDiagnostics(cs, diags)
}
}
}
func mergeRuns(runs []run) []diagnostic {
var relevantDiagnostics []diagnostic
for _, r := range runs {
for _, diag := range r.diagnostics {
switch diag.MergeIf {
case lint.MergeIfAny:
relevantDiagnostics = append(relevantDiagnostics, diag)
case lint.MergeIfAll:
doPrint := true
for _, r := range runs {
if _, ok := r.checkedFiles[diag.Position.Filename]; ok {
if _, ok := r.diagnostics[diag.descriptor()]; !ok {
doPrint = false
}
}
}
if doPrint {
relevantDiagnostics = append(relevantDiagnostics, diag)
}
}
}
}
return relevantDiagnostics
}
func (cmd *Command) exit(code int) {
if cmd.flags.debugCpuprofile != "" {
pprof.StopCPUProfile()
}
if path := cmd.flags.debugMemprofile; path != "" {
f, err := os.Create(path)
if err != nil {
panic(err)
}
runtime.GC()
pprof.WriteHeapProfile(f)
}
if cmd.flags.debugTrace != "" {
trace.Stop()
}
os.Exit(code)
}
func (cmd *Command) printDiagnostics(cs []*lint.Analyzer, diagnostics []diagnostic) {
if len(diagnostics) > 1 {
sort.Slice(diagnostics, func(i, j int) bool {
di := diagnostics[i]
dj := diagnostics[j]
pi := di.Position
pj := dj.Position
if pi.Filename != pj.Filename {
return pi.Filename < pj.Filename
}
if pi.Line != pj.Line {
return pi.Line < pj.Line
}
if pi.Column != pj.Column {
return pi.Column < pj.Column
}
if di.Message != dj.Message {
return di.Message < dj.Message
}
if di.BuildName != dj.BuildName {
return di.BuildName < dj.BuildName
}
return di.Category < dj.Category
})
filtered := []diagnostic{
diagnostics[0],
}
builds := []map[string]struct{}{
{diagnostics[0].BuildName: {}},
}
for _, diag := range diagnostics[1:] {
// We may encounter duplicate diagnostics because one file
// can be part of many packages, and because multiple
// build configurations may check the same files.
if !filtered[len(filtered)-1].equal(diag) {
if filtered[len(filtered)-1].descriptor() == diag.descriptor() {
// Diagnostics only differ in build name, track new name
builds[len(filtered)-1][diag.BuildName] = struct{}{}
} else {
filtered = append(filtered, diag)
builds = append(builds, map[string]struct{}{})
builds[len(filtered)-1][diag.BuildName] = struct{}{}
}
}
}
var names []string
for i := range filtered {
names = names[:0]
for k := range builds[i] {
names = append(names, k)
}
sort.Strings(names)
filtered[i].BuildName = strings.Join(names, ",")
}
diagnostics = filtered
}
var f formatter
switch cmd.flags.formatter {
case "text":
f = textFormatter{W: os.Stdout}
case "stylish":
f = &stylishFormatter{W: os.Stdout}
case "json":
f = jsonFormatter{W: os.Stdout}
case "sarif":
f = &sarifFormatter{
driverName: cmd.name,
driverVersion: cmd.version,
}
if cmd.name == "staticcheck" {
f.(*sarifFormatter).driverName = "Staticcheck"
f.(*sarifFormatter).driverWebsite = "https://staticcheck.io"
}
case "binary":
fmt.Fprintln(os.Stderr, "'-f binary' not supported in this context")
cmd.exit(2)
case "null":
f = nullFormatter{}
default:
fmt.Fprintf(os.Stderr, "unsupported output format %q\n", cmd.flags.formatter)
cmd.exit(2)
}
fail := cmd.flags.fail
analyzerNames := make([]string, len(cs))
for i, a := range cs {
analyzerNames[i] = a.Analyzer.Name
}
shouldExit := filterAnalyzerNames(analyzerNames, fail)
shouldExit["staticcheck"] = true
shouldExit["compile"] = true
var (
numErrors int
numWarnings int
numIgnored int
)
notIgnored := make([]diagnostic, 0, len(diagnostics))
for _, diag := range diagnostics {
if diag.Category == "compile" && cmd.flags.debugNoCompileErrors {
continue
}
if diag.Severity == severityIgnored && !cmd.flags.showIgnored {
numIgnored++
continue
}
if shouldExit[diag.Category] {
numErrors++
} else {
diag.Severity = severityWarning
numWarnings++
}
notIgnored = append(notIgnored, diag)
}
f.Format(cs, notIgnored)
if f, ok := f.(statter); ok {
f.Stats(len(diagnostics), numErrors, numWarnings, numIgnored)
}
if numErrors > 0 {
if _, ok := f.(*sarifFormatter); ok {
// When emitting SARIF, finding errors is considered success.
cmd.exit(0)
} else {
cmd.exit(1)
}
}
cmd.exit(0)
}
func usage(name string, fs *flag.FlagSet) func() {
return func() {
fmt.Fprintf(os.Stderr, "Usage: %s [flags] [packages]\n", name)
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "Flags:")
printDefaults(fs)
fmt.Fprintln(os.Stderr)
fmt.Fprintln(os.Stderr, "For help about specifying packages, see 'go help packages'")
}
}
// isZeroValue determines whether the string represents the zero
// value for a flag.
//
// this function has been copied from the Go standard library's 'flag' package.
func isZeroValue(f *flag.Flag, value string) bool {
// Build a zero value of the flag's Value type, and see if the
// result of calling its String method equals the value passed in.
// This works unless the Value type is itself an interface type.
typ := reflect.TypeOf(f.Value)
var z reflect.Value
if typ.Kind() == reflect.Ptr {
z = reflect.New(typ.Elem())
} else {
z = reflect.Zero(typ)
}
return value == z.Interface().(flag.Value).String()
}
// this function has been copied from the Go standard library's 'flag' package and modified to skip debug flags.
func printDefaults(fs *flag.FlagSet) {
fs.VisitAll(func(f *flag.Flag) {
// Don't print debug flags
if strings.HasPrefix(f.Name, "debug.") {
return
}
var b strings.Builder
fmt.Fprintf(&b, " -%s", f.Name) // Two spaces before -; see next two comments.
name, usage := flag.UnquoteUsage(f)
if len(name) > 0 {
b.WriteString(" ")
b.WriteString(name)
}
// Boolean flags of one ASCII letter are so common we
// treat them specially, putting their usage on the same line.
if b.Len() <= 4 { // space, space, '-', 'x'.
b.WriteString("\t")
} else {
// Four spaces before the tab triggers good alignment
// for both 4- and 8-space tab stops.
b.WriteString("\n \t")
}
b.WriteString(strings.ReplaceAll(usage, "\n", "\n \t"))
if !isZeroValue(f, f.DefValue) {
if T := reflect.TypeOf(f.Value); T.Name() == "*stringValue" && T.PkgPath() == "flag" {
// put quotes on the value
fmt.Fprintf(&b, " (default %q)", f.DefValue)
} else {
fmt.Fprintf(&b, " (default %v)", f.DefValue)
}
}
fmt.Fprint(fs.Output(), b.String(), "\n")
})
}

View File

@@ -0,0 +1,105 @@
package lintcmd
import (
"bufio"
"errors"
"fmt"
"io"
"strings"
"unicode"
)
type parseBuildConfigError struct {
line int
err error
}
func (err parseBuildConfigError) Error() string { return err.err.Error() }
func parseBuildConfigs(r io.Reader) ([]BuildConfig, error) {
var builds []BuildConfig
br := bufio.NewReader(r)
i := 0
for {
line, err := br.ReadString('\n')
if err != nil {
if err == io.EOF {
break
} else {
return nil, err
}
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
name, envs, flags, err := parseBuildConfig(line)
if err != nil {
return nil, parseBuildConfigError{line: i + 1, err: err}
}
bc := BuildConfig{
Name: name,
Envs: envs,
Flags: flags,
}
builds = append(builds, bc)
i++
}
return builds, nil
}
func parseBuildConfig(line string) (name string, envs []string, flags []string, err error) {
if line == "" {
return "", nil, nil, errors.New("couldn't parse empty build config")
}
if strings.Index(line, ":") == len(line)-1 {
name = line[:len(line)-1]
} else {
idx := strings.Index(line, ": ")
if idx == -1 {
return name, envs, flags, errors.New("missing build name")
}
name = line[:idx]
var buf []rune
var inQuote bool
args := &envs
for _, r := range strings.TrimSpace(line[idx+2:]) {
switch r {
case ' ':
if inQuote {
buf = append(buf, r)
} else if len(buf) != 0 {
if buf[0] == '-' {
args = &flags
}
*args = append(*args, string(buf))
buf = buf[:0]
}
case '"':
inQuote = !inQuote
default:
buf = append(buf, r)
}
}
if len(buf) > 0 {
if inQuote {
return "", nil, nil, errors.New("unterminated quoted string")
}
if buf[0] == '-' {
args = &flags
}
*args = append(*args, string(buf))
}
}
for _, r := range name {
if !(r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r)) {
return "", nil, nil, fmt.Errorf("invalid build name %q", name)
}
}
return name, envs, flags, nil
}

View File

@@ -0,0 +1,55 @@
package lintcmd
import (
"strings"
"honnef.co/go/tools/lintcmd/runner"
)
func parseDirectives(dirs []runner.SerializedDirective) ([]ignore, []diagnostic) {
var ignores []ignore
var diagnostics []diagnostic
for _, dir := range dirs {
cmd := dir.Command
args := dir.Arguments
switch cmd {
case "ignore", "file-ignore":
if len(args) < 2 {
p := diagnostic{
Diagnostic: runner.Diagnostic{
Position: dir.NodePosition,
Message: "malformed linter directive; missing the required reason field?",
Category: "compile",
},
Severity: severityError,
}
diagnostics = append(diagnostics, p)
continue
}
default:
// unknown directive, ignore
continue
}
checks := strings.Split(args[0], ",")
pos := dir.NodePosition
var ig ignore
switch cmd {
case "ignore":
ig = &lineIgnore{
File: pos.Filename,
Line: pos.Line,
Checks: checks,
Pos: dir.DirectivePosition,
}
case "file-ignore":
ig = &fileIgnore{
File: pos.Filename,
Checks: checks,
}
}
ignores = append(ignores, ig)
}
return ignores, diagnostics
}

View File

@@ -0,0 +1,161 @@
package lintcmd
import (
"encoding/json"
"fmt"
"go/token"
"io"
"os"
"path/filepath"
"text/tabwriter"
"honnef.co/go/tools/analysis/lint"
)
func shortPath(path string) string {
cwd, err := os.Getwd()
if err != nil {
return path
}
if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {
return rel
}
return path
}
func relativePositionString(pos token.Position) string {
s := shortPath(pos.Filename)
if pos.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
}
if s == "" {
s = "-"
}
return s
}
type statter interface {
Stats(total, errors, warnings, ignored int)
}
type formatter interface {
Format(checks []*lint.Analyzer, diagnostics []diagnostic)
}
type textFormatter struct {
W io.Writer
}
func (o textFormatter) Format(_ []*lint.Analyzer, ps []diagnostic) {
for _, p := range ps {
fmt.Fprintf(o.W, "%s: %s\n", relativePositionString(p.Position), p.String())
for _, r := range p.Related {
fmt.Fprintf(o.W, "\t%s: %s\n", relativePositionString(r.Position), r.Message)
}
}
}
type nullFormatter struct{}
func (nullFormatter) Format([]*lint.Analyzer, []diagnostic) {}
type jsonFormatter struct {
W io.Writer
}
func (o jsonFormatter) Format(_ []*lint.Analyzer, ps []diagnostic) {
type location struct {
File string `json:"file"`
Line int `json:"line"`
Column int `json:"column"`
}
type related struct {
Location location `json:"location"`
End location `json:"end"`
Message string `json:"message"`
}
enc := json.NewEncoder(o.W)
for _, p := range ps {
jp := struct {
Code string `json:"code"`
Severity string `json:"severity,omitempty"`
Location location `json:"location"`
End location `json:"end"`
Message string `json:"message"`
Related []related `json:"related,omitempty"`
}{
Code: p.Category,
Severity: p.Severity.String(),
Location: location{
File: p.Position.Filename,
Line: p.Position.Line,
Column: p.Position.Column,
},
End: location{
File: p.End.Filename,
Line: p.End.Line,
Column: p.End.Column,
},
Message: p.Message,
}
for _, r := range p.Related {
jp.Related = append(jp.Related, related{
Location: location{
File: r.Position.Filename,
Line: r.Position.Line,
Column: r.Position.Column,
},
End: location{
File: r.End.Filename,
Line: r.End.Line,
Column: r.End.Column,
},
Message: r.Message,
})
}
_ = enc.Encode(jp)
}
}
type stylishFormatter struct {
W io.Writer
prevFile string
tw *tabwriter.Writer
}
func (o *stylishFormatter) Format(_ []*lint.Analyzer, ps []diagnostic) {
for _, p := range ps {
pos := p.Position
if pos.Filename == "" {
pos.Filename = "-"
}
if pos.Filename != o.prevFile {
if o.prevFile != "" {
o.tw.Flush()
fmt.Fprintln(o.W)
}
fmt.Fprintln(o.W, pos.Filename)
o.prevFile = pos.Filename
o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
}
fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Category, p.Message)
for _, r := range p.Related {
fmt.Fprintf(o.tw, " (%d, %d)\t\t %s\n", r.Position.Line, r.Position.Column, r.Message)
}
}
}
func (o *stylishFormatter) Stats(total, errors, warnings, ignored int) {
if o.tw != nil {
o.tw.Flush()
fmt.Fprintln(o.W)
}
fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings, %d ignored)\n",
total, errors, warnings, ignored)
}

View File

@@ -0,0 +1,577 @@
package lintcmd
import (
"crypto/sha256"
"fmt"
"go/build"
"go/token"
"io"
"os"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"unicode"
"honnef.co/go/tools/analysis/lint"
"honnef.co/go/tools/config"
"honnef.co/go/tools/go/buildid"
"honnef.co/go/tools/go/loader"
"honnef.co/go/tools/lintcmd/cache"
"honnef.co/go/tools/lintcmd/runner"
"honnef.co/go/tools/unused"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
)
// A linter lints Go source code.
type linter struct {
Analyzers map[string]*lint.Analyzer
Runner *runner.Runner
}
func computeSalt() ([]byte, error) {
p, err := os.Executable()
if err != nil {
return nil, err
}
if id, err := buildid.ReadFile(p); err == nil {
return []byte(id), nil
} else {
// For some reason we couldn't read the build id from the executable.
// Fall back to hashing the entire executable.
f, err := os.Open(p)
if err != nil {
return nil, err
}
defer f.Close()
h := sha256.New()
if _, err := io.Copy(h, f); err != nil {
return nil, err
}
return h.Sum(nil), nil
}
}
func newLinter(cfg config.Config) (*linter, error) {
c, err := cache.Default()
if err != nil {
return nil, err
}
salt, err := computeSalt()
if err != nil {
return nil, fmt.Errorf("could not compute salt for cache: %s", err)
}
c.SetSalt(salt)
r, err := runner.New(cfg, c)
if err != nil {
return nil, err
}
r.FallbackGoVersion = defaultGoVersion()
return &linter{
Runner: r,
}, nil
}
type LintResult struct {
CheckedFiles []string
Diagnostics []diagnostic
Warnings []string
}
func (l *linter) Lint(cfg *packages.Config, patterns []string) (LintResult, error) {
var out LintResult
as := make([]*analysis.Analyzer, 0, len(l.Analyzers))
for _, a := range l.Analyzers {
as = append(as, a.Analyzer)
}
results, err := l.Runner.Run(cfg, as, patterns)
if err != nil {
return out, err
}
if len(results) == 0 {
// TODO(dh): emulate Go's behavior more closely once we have
// access to go list's Match field.
for _, pattern := range patterns {
fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern)
}
}
analyzerNames := make([]string, 0, len(l.Analyzers))
for name := range l.Analyzers {
analyzerNames = append(analyzerNames, name)
}
used := map[unusedKey]bool{}
var unuseds []unusedPair
for _, res := range results {
if len(res.Errors) > 0 && !res.Failed {
panic("package has errors but isn't marked as failed")
}
if res.Failed {
out.Diagnostics = append(out.Diagnostics, failed(res)...)
} else {
if res.Skipped {
out.Warnings = append(out.Warnings, fmt.Sprintf("skipped package %s because it is too large", res.Package))
continue
}
if !res.Initial {
continue
}
out.CheckedFiles = append(out.CheckedFiles, res.Package.GoFiles...)
allowedAnalyzers := filterAnalyzerNames(analyzerNames, res.Config.Checks)
resd, err := res.Load()
if err != nil {
return out, err
}
ps := success(allowedAnalyzers, resd)
filtered, err := filterIgnored(ps, resd, allowedAnalyzers)
if err != nil {
return out, err
}
// OPT move this code into the 'success' function.
for i, diag := range filtered {
a := l.Analyzers[diag.Category]
// Some diag.Category don't map to analyzers, such as "staticcheck"
if a != nil {
filtered[i].MergeIf = a.Doc.MergeIf
}
}
out.Diagnostics = append(out.Diagnostics, filtered...)
for _, obj := range resd.Unused.Used {
// FIXME(dh): pick the object whose filename does not include $GOROOT
key := unusedKey{
pkgPath: res.Package.PkgPath,
base: filepath.Base(obj.Position.Filename),
line: obj.Position.Line,
name: obj.Name,
}
used[key] = true
}
if allowedAnalyzers["U1000"] {
for _, obj := range resd.Unused.Unused {
key := unusedKey{
pkgPath: res.Package.PkgPath,
base: filepath.Base(obj.Position.Filename),
line: obj.Position.Line,
name: obj.Name,
}
unuseds = append(unuseds, unusedPair{key, obj})
if _, ok := used[key]; !ok {
used[key] = false
}
}
}
}
}
for _, uo := range unuseds {
if uo.obj.Kind == "type param" {
// We don't currently flag unused type parameters on used objects, and flagging them on unused objects isn't
// useful.
continue
}
if used[uo.key] {
continue
}
if uo.obj.InGenerated {
continue
}
out.Diagnostics = append(out.Diagnostics, diagnostic{
Diagnostic: runner.Diagnostic{
Position: uo.obj.DisplayPosition,
Message: fmt.Sprintf("%s %s is unused", uo.obj.Kind, uo.obj.Name),
Category: "U1000",
},
MergeIf: lint.MergeIfAll,
})
}
return out, nil
}
func filterIgnored(diagnostics []diagnostic, res runner.ResultData, allowedAnalyzers map[string]bool) ([]diagnostic, error) {
couldHaveMatched := func(ig *lineIgnore) bool {
for _, c := range ig.Checks {
if c == "U1000" {
// We never want to flag ignores for U1000,
// because U1000 isn't local to a single
// package. For example, an identifier may
// only be used by tests, in which case an
// ignore would only fire when not analyzing
// tests. To avoid spurious "useless ignore"
// warnings, just never flag U1000.
return false
}
// Even though the runner always runs all analyzers, we
// still only flag unmatched ignores for the set of
// analyzers the user has expressed interest in. That way,
// `staticcheck -checks=SA1000` won't complain about an
// unmatched ignore for an unrelated check.
if allowedAnalyzers[c] {
return true
}
}
return false
}
ignores, moreDiagnostics := parseDirectives(res.Directives)
for _, ig := range ignores {
for i := range diagnostics {
diag := &diagnostics[i]
if ig.Match(*diag) {
diag.Severity = severityIgnored
}
}
if ig, ok := ig.(*lineIgnore); ok && !ig.Matched && couldHaveMatched(ig) {
diag := diagnostic{
Diagnostic: runner.Diagnostic{
Position: ig.Pos,
Message: "this linter directive didn't match anything; should it be removed?",
Category: "staticcheck",
},
}
moreDiagnostics = append(moreDiagnostics, diag)
}
}
return append(diagnostics, moreDiagnostics...), nil
}
type ignore interface {
Match(diag diagnostic) bool
}
type lineIgnore struct {
File string
Line int
Checks []string
Matched bool
Pos token.Position
}
func (li *lineIgnore) Match(p diagnostic) bool {
pos := p.Position
if pos.Filename != li.File || pos.Line != li.Line {
return false
}
for _, c := range li.Checks {
if m, _ := filepath.Match(c, p.Category); m {
li.Matched = true
return true
}
}
return false
}
func (li *lineIgnore) String() string {
matched := "not matched"
if li.Matched {
matched = "matched"
}
return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
}
type fileIgnore struct {
File string
Checks []string
}
func (fi *fileIgnore) Match(p diagnostic) bool {
if p.Position.Filename != fi.File {
return false
}
for _, c := range fi.Checks {
if m, _ := filepath.Match(c, p.Category); m {
return true
}
}
return false
}
type severity uint8
const (
severityError severity = iota
severityWarning
severityIgnored
)
func (s severity) String() string {
switch s {
case severityError:
return "error"
case severityWarning:
return "warning"
case severityIgnored:
return "ignored"
default:
return fmt.Sprintf("Severity(%d)", s)
}
}
// diagnostic represents a diagnostic in some source code.
type diagnostic struct {
runner.Diagnostic
Severity severity
MergeIf lint.MergeStrategy
BuildName string
}
func (p diagnostic) equal(o diagnostic) bool {
return p.Position == o.Position &&
p.End == o.End &&
p.Message == o.Message &&
p.Category == o.Category &&
p.Severity == o.Severity &&
p.MergeIf == o.MergeIf &&
p.BuildName == o.BuildName
}
func (p *diagnostic) String() string {
if p.BuildName != "" {
return fmt.Sprintf("%s [%s] (%s)", p.Message, p.BuildName, p.Category)
} else {
return fmt.Sprintf("%s (%s)", p.Message, p.Category)
}
}
func failed(res runner.Result) []diagnostic {
var diagnostics []diagnostic
for _, e := range res.Errors {
switch e := e.(type) {
case packages.Error:
msg := e.Msg
if len(msg) != 0 && msg[0] == '\n' {
// TODO(dh): See https://github.com/golang/go/issues/32363
msg = msg[1:]
}
var posn token.Position
if e.Pos == "" {
// Under certain conditions (malformed package
// declarations, multiple packages in the same
// directory), go list emits an error on stderr
// instead of JSON. Those errors do not have
// associated position information in
// go/packages.Error, even though the output on
// stderr may contain it.
if p, n, err := parsePos(msg); err == nil {
if abs, err := filepath.Abs(p.Filename); err == nil {
p.Filename = abs
}
posn = p
msg = msg[n+2:]
}
} else {
var err error
posn, _, err = parsePos(e.Pos)
if err != nil {
panic(fmt.Sprintf("internal error: %s", e))
}
}
diag := diagnostic{
Diagnostic: runner.Diagnostic{
Position: posn,
Message: msg,
Category: "compile",
},
Severity: severityError,
}
diagnostics = append(diagnostics, diag)
case error:
diag := diagnostic{
Diagnostic: runner.Diagnostic{
Position: token.Position{},
Message: e.Error(),
Category: "compile",
},
Severity: severityError,
}
diagnostics = append(diagnostics, diag)
}
}
return diagnostics
}
type unusedKey struct {
pkgPath string
base string
line int
name string
}
type unusedPair struct {
key unusedKey
obj unused.SerializedObject
}
func success(allowedAnalyzers map[string]bool, res runner.ResultData) []diagnostic {
diags := res.Diagnostics
var diagnostics []diagnostic
for _, diag := range diags {
if !allowedAnalyzers[diag.Category] {
continue
}
diagnostics = append(diagnostics, diagnostic{Diagnostic: diag})
}
return diagnostics
}
func defaultGoVersion() string {
tags := build.Default.ReleaseTags
v := tags[len(tags)-1][2:]
return v
}
func filterAnalyzerNames(analyzers []string, checks []string) map[string]bool {
allowedChecks := map[string]bool{}
for _, check := range checks {
b := true
if len(check) > 1 && check[0] == '-' {
b = false
check = check[1:]
}
if check == "*" || check == "all" {
// Match all
for _, c := range analyzers {
allowedChecks[c] = b
}
} else if strings.HasSuffix(check, "*") {
// Glob
prefix := check[:len(check)-1]
isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
for _, a := range analyzers {
idx := strings.IndexFunc(a, func(r rune) bool { return unicode.IsNumber(r) })
if isCat {
// Glob is S*, which should match S1000 but not SA1000
cat := a[:idx]
if prefix == cat {
allowedChecks[a] = b
}
} else {
// Glob is S1*
if strings.HasPrefix(a, prefix) {
allowedChecks[a] = b
}
}
}
} else {
// Literal check name
allowedChecks[check] = b
}
}
return allowedChecks
}
var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`)
func parsePos(pos string) (token.Position, int, error) {
if pos == "-" || pos == "" {
return token.Position{}, 0, nil
}
parts := posRe.FindStringSubmatch(pos)
if parts == nil {
return token.Position{}, 0, fmt.Errorf("internal error: malformed position %q", pos)
}
file := parts[1]
line, _ := strconv.Atoi(parts[2])
col, _ := strconv.Atoi(parts[3])
return token.Position{
Filename: file,
Line: line,
Column: col,
}, len(parts[0]), nil
}
type options struct {
Config config.Config
BuildConfig BuildConfig
LintTests bool
GoVersion string
PrintAnalyzerMeasurement func(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration)
}
func doLint(as []*lint.Analyzer, paths []string, opt *options) (LintResult, error) {
if opt == nil {
opt = &options{}
}
l, err := newLinter(opt.Config)
if err != nil {
return LintResult{}, err
}
analyzers := make(map[string]*lint.Analyzer, len(as))
for _, a := range as {
analyzers[a.Analyzer.Name] = a
}
l.Analyzers = analyzers
l.Runner.GoVersion = opt.GoVersion
l.Runner.Stats.PrintAnalyzerMeasurement = opt.PrintAnalyzerMeasurement
cfg := &packages.Config{}
if opt.LintTests {
cfg.Tests = true
}
cfg.BuildFlags = opt.BuildConfig.Flags
cfg.Env = append(os.Environ(), opt.BuildConfig.Envs...)
printStats := func() {
// Individual stats are read atomically, but overall there
// is no synchronisation. For printing rough progress
// information, this doesn't matter.
switch l.Runner.Stats.State() {
case runner.StateInitializing:
fmt.Fprintln(os.Stderr, "Status: initializing")
case runner.StateLoadPackageGraph:
fmt.Fprintln(os.Stderr, "Status: loading package graph")
case runner.StateBuildActionGraph:
fmt.Fprintln(os.Stderr, "Status: building action graph")
case runner.StateProcessing:
fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d\n",
l.Runner.Stats.ProcessedInitialPackages(),
l.Runner.Stats.InitialPackages(),
l.Runner.Stats.ProcessedPackages(),
l.Runner.Stats.TotalPackages(),
l.Runner.ActiveWorkers(),
l.Runner.TotalWorkers(),
)
case runner.StateFinalizing:
fmt.Fprintln(os.Stderr, "Status: finalizing")
}
}
if len(infoSignals) > 0 {
ch := make(chan os.Signal, 1)
signal.Notify(ch, infoSignals...)
defer signal.Stop(ch)
go func() {
for range ch {
printStats()
}
}()
}
res, err := l.Lint(cfg, paths)
for i := range res.Diagnostics {
res.Diagnostics[i].BuildName = opt.BuildConfig.Name
}
return res, err
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,49 @@
package runner
import (
"sync/atomic"
"time"
"honnef.co/go/tools/go/loader"
"golang.org/x/tools/go/analysis"
)
const (
StateInitializing = iota
StateLoadPackageGraph
StateBuildActionGraph
StateProcessing
StateFinalizing
)
type Stats struct {
state uint32
initialPackages uint32
totalPackages uint32
processedPackages uint32
processedInitialPackages uint32
// optional function to call every time an analyzer has finished analyzing a package.
PrintAnalyzerMeasurement func(*analysis.Analyzer, *loader.PackageSpec, time.Duration)
}
func (s *Stats) setState(state uint32) { atomic.StoreUint32(&s.state, state) }
func (s *Stats) State() int { return int(atomic.LoadUint32(&s.state)) }
func (s *Stats) setInitialPackages(n int) { atomic.StoreUint32(&s.initialPackages, uint32(n)) }
func (s *Stats) InitialPackages() int { return int(atomic.LoadUint32(&s.initialPackages)) }
func (s *Stats) setTotalPackages(n int) { atomic.StoreUint32(&s.totalPackages, uint32(n)) }
func (s *Stats) TotalPackages() int { return int(atomic.LoadUint32(&s.totalPackages)) }
func (s *Stats) finishPackage() { atomic.AddUint32(&s.processedPackages, 1) }
func (s *Stats) finishInitialPackage() { atomic.AddUint32(&s.processedInitialPackages, 1) }
func (s *Stats) ProcessedPackages() int { return int(atomic.LoadUint32(&s.processedPackages)) }
func (s *Stats) ProcessedInitialPackages() int {
return int(atomic.LoadUint32(&s.processedInitialPackages))
}
func (s *Stats) measureAnalyzer(analysis *analysis.Analyzer, pkg *loader.PackageSpec, d time.Duration) {
if s.PrintAnalyzerMeasurement != nil {
s.PrintAnalyzerMeasurement(analysis, pkg, d)
}
}

View File

@@ -0,0 +1,370 @@
package lintcmd
// Notes on GitHub-specific restrictions:
//
// Result.Message needs to either have ID or Text set. Markdown
// gets ignored. Text isn't treated verbatim however: Markdown
// formatting gets stripped, except for links.
//
// GitHub does not display RelatedLocations. The only way to make
// use of them is to link to them (via their ID) in the
// Result.Message. And even then, it will only show the referred
// line of code, not the message. We can duplicate the messages in
// the Result.Message, but we can't even indent them, because
// leading whitespace gets stripped.
//
// GitHub does use the Markdown version of rule help, but it
// renders it the way it renders comments on issues that is, it
// turns line breaks into hard line breaks, even though it
// shouldn't.
//
// GitHub doesn't make use of the tool's URI or version, nor of
// the help URIs of rules.
//
// There does not seem to be a way of using SARIF for "normal" CI,
// without results showing up as code scanning alerts. Also, a
// SARIF file containing only warnings, no errors, will not fail
// CI by default, but this is configurable.
// GitHub does display some parts of SARIF results in PRs, but
// most of the useful parts of SARIF, such as help text of rules,
// is only accessible via the code scanning alerts, which are only
// accessible by users with write permissions.
//
// Result.Suppressions is being ignored.
//
//
// Notes on other tools
//
// VS Code Sarif viewer
//
// The Sarif viewer in VS Code displays the full message in the
// tabular view, removing newlines. That makes our multi-line
// messages (which we use as a workaround for missing related
// information) very ugly.
//
// Much like GitHub, the Sarif viewer does not make related
// information visible unless we explicitly refer to it in the
// message.
//
// Suggested fixes are not exposed in any way.
//
// It only shows the shortDescription or fullDescription of a
// rule, not its help. We can't put the help in fullDescription,
// because the fullDescription isn't meant to be that long. For
// example, GitHub displays it in a single line, under the
// shortDescription.
//
// VS Code can filter based on Result.Suppressions, but it doesn't
// display our suppression message. Also, by default, suppressed
// results get shown, and the column indicating that a result is
// suppressed is hidden, which makes for a confusing experience.
//
// When a rule has only an ID, no name, VS Code displays a
// prominent dash in place of the name. When the name and ID are
// identical, it prints both. However, we can't make them
// identical, as SARIF requires that either the ID and name are
// different, or that the name is omitted.
// FIXME(dh): we're currently reporting column information using UTF-8
// byte offsets, not using Unicode code points or UTF-16, which are
// the only two ways allowed by SARIF.
// TODO(dh) set properties.tags we can use different tags for the
// staticcheck, simple, stylecheck and unused checks, so users can
// filter their results
import (
"encoding/json"
"fmt"
"net/url"
"os"
"path/filepath"
"regexp"
"strings"
"honnef.co/go/tools/analysis/lint"
"honnef.co/go/tools/sarif"
)
type sarifFormatter struct {
driverName string
driverVersion string
driverWebsite string
}
func sarifLevel(severity lint.Severity) string {
switch severity {
case lint.SeverityNone:
// no configured severity, default to warning
return "warning"
case lint.SeverityError:
return "error"
case lint.SeverityDeprecated:
return "warning"
case lint.SeverityWarning:
return "warning"
case lint.SeverityInfo:
return "note"
case lint.SeverityHint:
return "note"
default:
// unreachable
return "none"
}
}
func encodePath(path string) string {
return (&url.URL{Path: path}).EscapedPath()
}
func sarifURI(path string) string {
u := url.URL{
Scheme: "file",
Path: path,
}
return u.String()
}
func sarifArtifactLocation(name string) sarif.ArtifactLocation {
// Ideally we use relative paths so that GitHub can resolve them
name = shortPath(name)
if filepath.IsAbs(name) {
return sarif.ArtifactLocation{
URI: sarifURI(name),
}
} else {
return sarif.ArtifactLocation{
URI: encodePath(name),
URIBaseID: "%SRCROOT%", // This is specific to GitHub,
}
}
}
func sarifFormatText(s string) string {
// GitHub doesn't ignore line breaks, even though it should, so we remove them.
var out strings.Builder
lines := strings.Split(s, "\n")
for i, line := range lines[:len(lines)-1] {
out.WriteString(line)
if line == "" {
out.WriteString("\n")
} else {
nextLine := lines[i+1]
if nextLine == "" || strings.HasPrefix(line, "> ") || strings.HasPrefix(line, " ") {
out.WriteString("\n")
} else {
out.WriteString(" ")
}
}
}
out.WriteString(lines[len(lines)-1])
return convertCodeBlocks(out.String())
}
func moreCodeFollows(lines []string) bool {
for _, line := range lines {
if line == "" {
continue
}
if strings.HasPrefix(line, " ") {
return true
} else {
return false
}
}
return false
}
var alpha = regexp.MustCompile(`^[a-zA-Z ]+$`)
func convertCodeBlocks(text string) string {
var buf strings.Builder
lines := strings.Split(text, "\n")
inCode := false
empties := 0
for i, line := range lines {
if inCode {
if !moreCodeFollows(lines[i:]) {
if inCode {
fmt.Fprintln(&buf, "```")
inCode = false
}
}
}
prevEmpties := empties
if line == "" && !inCode {
empties++
} else {
empties = 0
}
if line == "" {
fmt.Fprintln(&buf)
continue
}
if strings.HasPrefix(line, " ") {
line = line[4:]
if !inCode {
fmt.Fprintln(&buf, "```go")
inCode = true
}
}
onlyAlpha := alpha.MatchString(line)
out := line
if !inCode && prevEmpties >= 2 && onlyAlpha {
fmt.Fprintf(&buf, "## %s\n", out)
} else {
fmt.Fprint(&buf, out)
fmt.Fprintln(&buf)
}
}
if inCode {
fmt.Fprintln(&buf, "```")
}
return buf.String()
}
func (o *sarifFormatter) Format(checks []*lint.Analyzer, diagnostics []diagnostic) {
// TODO(dh): some diagnostics shouldn't be reported as results. For example, when the user specifies a package on the command line that doesn't exist.
cwd, _ := os.Getwd()
run := sarif.Run{
Tool: sarif.Tool{
Driver: sarif.ToolComponent{
Name: o.driverName,
Version: o.driverVersion,
InformationURI: o.driverWebsite,
},
},
Invocations: []sarif.Invocation{{
Arguments: os.Args[1:],
WorkingDirectory: sarif.ArtifactLocation{
URI: sarifURI(cwd),
},
ExecutionSuccessful: true,
}},
}
for _, c := range checks {
run.Tool.Driver.Rules = append(run.Tool.Driver.Rules,
sarif.ReportingDescriptor{
// We don't set Name, as Name and ID mustn't be identical.
ID: c.Analyzer.Name,
ShortDescription: sarif.Message{
Text: c.Doc.Title,
Markdown: c.Doc.TitleMarkdown,
},
HelpURI: "https://staticcheck.io/docs/checks#" + c.Analyzer.Name,
// We use our markdown as the plain text version, too. We
// use very little markdown, primarily quotations,
// indented code blocks and backticks. All of these are
// fine as plain text, too.
Help: sarif.Message{
Text: sarifFormatText(c.Doc.Format(false)),
Markdown: sarifFormatText(c.Doc.FormatMarkdown(false)),
},
DefaultConfiguration: sarif.ReportingConfiguration{
// TODO(dh): we could figure out which checks were disabled globally
Enabled: true,
Level: sarifLevel(c.Doc.Severity),
},
})
}
for _, p := range diagnostics {
r := sarif.Result{
RuleID: p.Category,
Kind: sarif.Fail,
Message: sarif.Message{
Text: p.Message,
},
}
r.Locations = []sarif.Location{{
PhysicalLocation: sarif.PhysicalLocation{
ArtifactLocation: sarifArtifactLocation(p.Position.Filename),
Region: sarif.Region{
StartLine: p.Position.Line,
StartColumn: p.Position.Column,
EndLine: p.End.Line,
EndColumn: p.End.Column,
},
},
}}
for _, fix := range p.SuggestedFixes {
sfix := sarif.Fix{
Description: sarif.Message{
Text: fix.Message,
},
}
// file name -> replacements
changes := map[string][]sarif.Replacement{}
for _, edit := range fix.TextEdits {
changes[edit.Position.Filename] = append(changes[edit.Position.Filename], sarif.Replacement{
DeletedRegion: sarif.Region{
StartLine: edit.Position.Line,
StartColumn: edit.Position.Column,
EndLine: edit.End.Line,
EndColumn: edit.End.Column,
},
InsertedContent: sarif.ArtifactContent{
Text: string(edit.NewText),
},
})
}
for path, replacements := range changes {
sfix.ArtifactChanges = append(sfix.ArtifactChanges, sarif.ArtifactChange{
ArtifactLocation: sarifArtifactLocation(path),
Replacements: replacements,
})
}
r.Fixes = append(r.Fixes, sfix)
}
for i, related := range p.Related {
r.Message.Text += fmt.Sprintf("\n\t[%s](%d)", related.Message, i+1)
r.RelatedLocations = append(r.RelatedLocations,
sarif.Location{
ID: i + 1,
Message: &sarif.Message{
Text: related.Message,
},
PhysicalLocation: sarif.PhysicalLocation{
ArtifactLocation: sarifArtifactLocation(related.Position.Filename),
Region: sarif.Region{
StartLine: related.Position.Line,
StartColumn: related.Position.Column,
EndLine: related.End.Line,
EndColumn: related.End.Column,
},
},
})
}
if p.Severity == severityIgnored {
// Note that GitHub does not support suppressions, which is why Staticcheck still requires the -show-ignored flag to be set for us to emit ignored diagnostics.
r.Suppressions = []sarif.Suppression{{
Kind: "inSource",
// TODO(dh): populate the Justification field
}}
} else {
// We want an empty slice, not nil. SARIF differentiates
// between the two. An empty slice means that the diagnostic
// wasn't suppressed, while nil means that we don't have the
// information available.
r.Suppressions = []sarif.Suppression{}
}
run.Results = append(run.Results, r)
}
json.NewEncoder(os.Stdout).Encode(sarif.Log{
Version: sarif.Version,
Schema: sarif.Schema,
Runs: []sarif.Run{run},
})
}

View File

@@ -0,0 +1,8 @@
//go:build !aix && !android && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
// +build !aix,!android,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package lintcmd
import "os"
var infoSignals = []os.Signal{}

View File

@@ -0,0 +1,11 @@
//go:build darwin || dragonfly || freebsd || netbsd || openbsd
// +build darwin dragonfly freebsd netbsd openbsd
package lintcmd
import (
"os"
"syscall"
)
var infoSignals = []os.Signal{syscall.SIGINFO}

View File

@@ -0,0 +1,11 @@
//go:build aix || android || linux || solaris
// +build aix android linux solaris
package lintcmd
import (
"os"
"syscall"
)
var infoSignals = []os.Signal{syscall.SIGUSR1}

View File

@@ -0,0 +1,44 @@
package version
import (
"fmt"
"runtime/debug"
)
func printBuildInfo() {
if info, ok := debug.ReadBuildInfo(); ok {
fmt.Println("Main module:")
printModule(&info.Main)
fmt.Println("Dependencies:")
for _, dep := range info.Deps {
printModule(dep)
}
} else {
fmt.Println("Built without Go modules")
}
}
func buildInfoVersion() (string, bool) {
info, ok := debug.ReadBuildInfo()
if !ok {
return "", false
}
if info.Main.Version == "(devel)" {
return "", false
}
return info.Main.Version, true
}
func printModule(m *debug.Module) {
fmt.Printf("\t%s", m.Path)
if m.Version != "(devel)" {
fmt.Printf("@%s", m.Version)
}
if m.Sum != "" {
fmt.Printf(" (sum: %s)", m.Sum)
}
if m.Replace != nil {
fmt.Printf(" (replace: %s)", m.Replace.Path)
}
fmt.Println()
}

View File

@@ -0,0 +1,43 @@
package version
import (
"fmt"
"os"
"path/filepath"
"runtime"
)
const Version = "2022.1.1"
const MachineVersion = "v0.3.1"
// version returns a version descriptor and reports whether the
// version is a known release.
func version(human, machine string) (human_, machine_ string, known bool) {
if human != "devel" {
return human, machine, true
}
v, ok := buildInfoVersion()
if ok {
return v, "", false
}
return "devel", "", false
}
func Print(human, machine string) {
human, machine, release := version(human, machine)
if release {
fmt.Printf("%s %s (%s)\n", filepath.Base(os.Args[0]), human, machine)
} else if human == "devel" {
fmt.Printf("%s (no version)\n", filepath.Base(os.Args[0]))
} else {
fmt.Printf("%s (devel, %s)\n", filepath.Base(os.Args[0]), human)
}
}
func Verbose(human, machine string) {
Print(human, machine)
fmt.Println()
fmt.Println("Compiled with Go version:", runtime.Version())
printBuildInfo()
}