go.*: Update k8s packages

- update k8s client_go
 - update k8s apiextensions-apiserver
 - update k8s controller-tools

Signed-off-by: leonnicolas <leonloechner@gmx.de>
This commit is contained in:
leonnicolas
2022-04-23 11:01:19 +02:00
parent e20d13ace0
commit 3eaacc01ae
762 changed files with 58552 additions and 14514 deletions

View File

@@ -21,17 +21,17 @@ import (
"sync"
"time"
utilclock "k8s.io/apimachinery/pkg/util/clock"
"k8s.io/utils/clock"
)
// NewExpiring returns an initialized expiring cache.
func NewExpiring() *Expiring {
return NewExpiringWithClock(utilclock.RealClock{})
return NewExpiringWithClock(clock.RealClock{})
}
// NewExpiringWithClock is like NewExpiring but allows passing in a custom
// clock for testing.
func NewExpiringWithClock(clock utilclock.Clock) *Expiring {
func NewExpiringWithClock(clock clock.Clock) *Expiring {
return &Expiring{
clock: clock,
cache: make(map[interface{}]entry),
@@ -40,7 +40,7 @@ func NewExpiringWithClock(clock utilclock.Clock) *Expiring {
// Expiring is a map whose entries expire after a per-entry timeout.
type Expiring struct {
clock utilclock.Clock
clock clock.Clock
// mu protects the below fields
mu sync.RWMutex

View File

@@ -17,10 +17,9 @@ limitations under the License.
package cache
import (
"container/list"
"sync"
"time"
"github.com/hashicorp/golang-lru"
)
// Clock defines an interface for obtaining the current time
@@ -39,8 +38,11 @@ type LRUExpireCache struct {
// clock is used to obtain the current time
clock Clock
cache *lru.Cache
lock sync.Mutex
lock sync.Mutex
maxSize int
evictionList list.List
entries map[interface{}]*list.Element
}
// NewLRUExpireCache creates an expiring cache with the given size
@@ -50,15 +52,19 @@ func NewLRUExpireCache(maxSize int) *LRUExpireCache {
// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time.
func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache {
cache, err := lru.New(maxSize)
if err != nil {
// if called with an invalid size
panic(err)
if maxSize <= 0 {
panic("maxSize must be > 0")
}
return &LRUExpireCache{
clock: clock,
maxSize: maxSize,
entries: map[interface{}]*list.Element{},
}
return &LRUExpireCache{clock: clock, cache: cache}
}
type cacheEntry struct {
key interface{}
value interface{}
expireTime time.Time
}
@@ -67,7 +73,31 @@ type cacheEntry struct {
func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)})
// Key already exists
oldElement, ok := c.entries[key]
if ok {
c.evictionList.MoveToFront(oldElement)
oldElement.Value.(*cacheEntry).value = value
oldElement.Value.(*cacheEntry).expireTime = c.clock.Now().Add(ttl)
return
}
// Make space if necessary
if c.evictionList.Len() >= c.maxSize {
toEvict := c.evictionList.Back()
c.evictionList.Remove(toEvict)
delete(c.entries, toEvict.Value.(*cacheEntry).key)
}
// Add new entry
entry := &cacheEntry{
key: key,
value: value,
expireTime: c.clock.Now().Add(ttl),
}
element := c.evictionList.PushFront(entry)
c.entries[key] = element
}
// Get returns the value at the specified key from the cache if it exists and is not
@@ -75,28 +105,56 @@ func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Durati
func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) {
c.lock.Lock()
defer c.lock.Unlock()
e, ok := c.cache.Get(key)
element, ok := c.entries[key]
if !ok {
return nil, false
}
if c.clock.Now().After(e.(*cacheEntry).expireTime) {
c.cache.Remove(key)
if c.clock.Now().After(element.Value.(*cacheEntry).expireTime) {
c.evictionList.Remove(element)
delete(c.entries, key)
return nil, false
}
return e.(*cacheEntry).value, true
c.evictionList.MoveToFront(element)
return element.Value.(*cacheEntry).value, true
}
// Remove removes the specified key from the cache if it exists
func (c *LRUExpireCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Remove(key)
element, ok := c.entries[key]
if !ok {
return
}
c.evictionList.Remove(element)
delete(c.entries, key)
}
// Keys returns all the keys in the cache, even if they are expired. Subsequent calls to
// get may return not found. It returns all keys from oldest to newest.
// Keys returns all unexpired keys in the cache.
//
// Keep in mind that subsequent calls to Get() for any of the returned keys
// might return "not found".
//
// Keys are returned ordered from least recently used to most recently used.
func (c *LRUExpireCache) Keys() []interface{} {
c.lock.Lock()
defer c.lock.Unlock()
return c.cache.Keys()
now := c.clock.Now()
val := make([]interface{}, 0, c.evictionList.Len())
for element := c.evictionList.Back(); element != nil; element = element.Prev() {
// Only return unexpired keys
if !now.After(element.Value.(*cacheEntry).expireTime) {
val = append(val, element.Value.(*cacheEntry).key)
}
}
return val
}

View File

@@ -1,445 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clock
import (
"sync"
"time"
)
// PassiveClock allows for injecting fake or real clocks into code
// that needs to read the current time but does not support scheduling
// activity in the future.
type PassiveClock interface {
Now() time.Time
Since(time.Time) time.Duration
}
// Clock allows for injecting fake or real clocks into code that
// needs to do arbitrary things based on time.
type Clock interface {
PassiveClock
After(time.Duration) <-chan time.Time
AfterFunc(time.Duration, func()) Timer
NewTimer(time.Duration) Timer
Sleep(time.Duration)
NewTicker(time.Duration) Ticker
}
// RealClock really calls time.Now()
type RealClock struct{}
// Now returns the current time.
func (RealClock) Now() time.Time {
return time.Now()
}
// Since returns time since the specified timestamp.
func (RealClock) Since(ts time.Time) time.Duration {
return time.Since(ts)
}
// After is the same as time.After(d).
func (RealClock) After(d time.Duration) <-chan time.Time {
return time.After(d)
}
// AfterFunc is the same as time.AfterFunc(d, f).
func (RealClock) AfterFunc(d time.Duration, f func()) Timer {
return &realTimer{
timer: time.AfterFunc(d, f),
}
}
// NewTimer returns a new Timer.
func (RealClock) NewTimer(d time.Duration) Timer {
return &realTimer{
timer: time.NewTimer(d),
}
}
// NewTicker returns a new Ticker.
func (RealClock) NewTicker(d time.Duration) Ticker {
return &realTicker{
ticker: time.NewTicker(d),
}
}
// Sleep pauses the RealClock for duration d.
func (RealClock) Sleep(d time.Duration) {
time.Sleep(d)
}
// FakePassiveClock implements PassiveClock, but returns an arbitrary time.
type FakePassiveClock struct {
lock sync.RWMutex
time time.Time
}
// FakeClock implements Clock, but returns an arbitrary time.
type FakeClock struct {
FakePassiveClock
// waiters are waiting for the fake time to pass their specified time
waiters []fakeClockWaiter
}
type fakeClockWaiter struct {
targetTime time.Time
stepInterval time.Duration
skipIfBlocked bool
destChan chan time.Time
afterFunc func()
}
// NewFakePassiveClock returns a new FakePassiveClock.
func NewFakePassiveClock(t time.Time) *FakePassiveClock {
return &FakePassiveClock{
time: t,
}
}
// NewFakeClock returns a new FakeClock
func NewFakeClock(t time.Time) *FakeClock {
return &FakeClock{
FakePassiveClock: *NewFakePassiveClock(t),
}
}
// Now returns f's time.
func (f *FakePassiveClock) Now() time.Time {
f.lock.RLock()
defer f.lock.RUnlock()
return f.time
}
// Since returns time since the time in f.
func (f *FakePassiveClock) Since(ts time.Time) time.Duration {
f.lock.RLock()
defer f.lock.RUnlock()
return f.time.Sub(ts)
}
// SetTime sets the time on the FakePassiveClock.
func (f *FakePassiveClock) SetTime(t time.Time) {
f.lock.Lock()
defer f.lock.Unlock()
f.time = t
}
// After is the Fake version of time.After(d).
func (f *FakeClock) After(d time.Duration) <-chan time.Time {
f.lock.Lock()
defer f.lock.Unlock()
stopTime := f.time.Add(d)
ch := make(chan time.Time, 1) // Don't block!
f.waiters = append(f.waiters, fakeClockWaiter{
targetTime: stopTime,
destChan: ch,
})
return ch
}
// AfterFunc is the Fake version of time.AfterFunc(d, callback).
func (f *FakeClock) AfterFunc(d time.Duration, cb func()) Timer {
f.lock.Lock()
defer f.lock.Unlock()
stopTime := f.time.Add(d)
ch := make(chan time.Time, 1) // Don't block!
timer := &fakeTimer{
fakeClock: f,
waiter: fakeClockWaiter{
targetTime: stopTime,
destChan: ch,
afterFunc: cb,
},
}
f.waiters = append(f.waiters, timer.waiter)
return timer
}
// NewTimer is the Fake version of time.NewTimer(d).
func (f *FakeClock) NewTimer(d time.Duration) Timer {
f.lock.Lock()
defer f.lock.Unlock()
stopTime := f.time.Add(d)
ch := make(chan time.Time, 1) // Don't block!
timer := &fakeTimer{
fakeClock: f,
waiter: fakeClockWaiter{
targetTime: stopTime,
destChan: ch,
},
}
f.waiters = append(f.waiters, timer.waiter)
return timer
}
// NewTicker returns a new Ticker.
func (f *FakeClock) NewTicker(d time.Duration) Ticker {
f.lock.Lock()
defer f.lock.Unlock()
tickTime := f.time.Add(d)
ch := make(chan time.Time, 1) // hold one tick
f.waiters = append(f.waiters, fakeClockWaiter{
targetTime: tickTime,
stepInterval: d,
skipIfBlocked: true,
destChan: ch,
})
return &fakeTicker{
c: ch,
}
}
// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer
func (f *FakeClock) Step(d time.Duration) {
f.lock.Lock()
defer f.lock.Unlock()
f.setTimeLocked(f.time.Add(d))
}
// SetTime sets the time on a FakeClock.
func (f *FakeClock) SetTime(t time.Time) {
f.lock.Lock()
defer f.lock.Unlock()
f.setTimeLocked(t)
}
// Actually changes the time and checks any waiters. f must be write-locked.
func (f *FakeClock) setTimeLocked(t time.Time) {
f.time = t
newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
for i := range f.waiters {
w := &f.waiters[i]
if !w.targetTime.After(t) {
if w.skipIfBlocked {
select {
case w.destChan <- t:
default:
}
} else {
w.destChan <- t
}
if w.afterFunc != nil {
w.afterFunc()
}
if w.stepInterval > 0 {
for !w.targetTime.After(t) {
w.targetTime = w.targetTime.Add(w.stepInterval)
}
newWaiters = append(newWaiters, *w)
}
} else {
newWaiters = append(newWaiters, f.waiters[i])
}
}
f.waiters = newWaiters
}
// HasWaiters returns true if After or AfterFunc has been called on f but not yet satisfied
// (so you can write race-free tests).
func (f *FakeClock) HasWaiters() bool {
f.lock.RLock()
defer f.lock.RUnlock()
return len(f.waiters) > 0
}
// Sleep pauses the FakeClock for duration d.
func (f *FakeClock) Sleep(d time.Duration) {
f.Step(d)
}
// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
type IntervalClock struct {
Time time.Time
Duration time.Duration
}
// Now returns i's time.
func (i *IntervalClock) Now() time.Time {
i.Time = i.Time.Add(i.Duration)
return i.Time
}
// Since returns time since the time in i.
func (i *IntervalClock) Since(ts time.Time) time.Duration {
return i.Time.Sub(ts)
}
// After is currently unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) After(d time.Duration) <-chan time.Time {
panic("IntervalClock doesn't implement After")
}
// AfterFunc is currently unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) AfterFunc(d time.Duration, cb func()) Timer {
panic("IntervalClock doesn't implement AfterFunc")
}
// NewTimer is currently unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) NewTimer(d time.Duration) Timer {
panic("IntervalClock doesn't implement NewTimer")
}
// NewTicker is currently unimplemented, will panic.
// TODO: make interval clock use FakeClock so this can be implemented.
func (*IntervalClock) NewTicker(d time.Duration) Ticker {
panic("IntervalClock doesn't implement NewTicker")
}
// Sleep is currently unimplemented; will panic.
func (*IntervalClock) Sleep(d time.Duration) {
panic("IntervalClock doesn't implement Sleep")
}
// Timer allows for injecting fake or real timers into code that
// needs to do arbitrary things based on time.
type Timer interface {
C() <-chan time.Time
Stop() bool
Reset(d time.Duration) bool
}
// realTimer is backed by an actual time.Timer.
type realTimer struct {
timer *time.Timer
}
// C returns the underlying timer's channel.
func (r *realTimer) C() <-chan time.Time {
return r.timer.C
}
// Stop calls Stop() on the underlying timer.
func (r *realTimer) Stop() bool {
return r.timer.Stop()
}
// Reset calls Reset() on the underlying timer.
func (r *realTimer) Reset(d time.Duration) bool {
return r.timer.Reset(d)
}
// fakeTimer implements Timer based on a FakeClock.
type fakeTimer struct {
fakeClock *FakeClock
waiter fakeClockWaiter
}
// C returns the channel that notifies when this timer has fired.
func (f *fakeTimer) C() <-chan time.Time {
return f.waiter.destChan
}
// Stop conditionally stops the timer. If the timer has neither fired
// nor been stopped then this call stops the timer and returns true,
// otherwise this call returns false. This is like time.Timer::Stop.
func (f *fakeTimer) Stop() bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
// The timer has already fired or been stopped, unless it is found
// among the clock's waiters.
stopped := false
oldWaiters := f.fakeClock.waiters
newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters))
seekChan := f.waiter.destChan
for i := range oldWaiters {
// Identify the timer's fakeClockWaiter by the identity of the
// destination channel, nothing else is necessarily unique and
// constant since the timer's creation.
if oldWaiters[i].destChan == seekChan {
stopped = true
} else {
newWaiters = append(newWaiters, oldWaiters[i])
}
}
f.fakeClock.waiters = newWaiters
return stopped
}
// Reset conditionally updates the firing time of the timer. If the
// timer has neither fired nor been stopped then this call resets the
// timer to the fake clock's "now" + d and returns true, otherwise
// it creates a new waiter, adds it to the clock, and returns true.
//
// It is not possible to return false, because a fake timer can be reset
// from any state (waiting to fire, already fired, and stopped).
//
// See the GoDoc for time.Timer::Reset for more context on why
// the return value of Reset() is not useful.
func (f *fakeTimer) Reset(d time.Duration) bool {
f.fakeClock.lock.Lock()
defer f.fakeClock.lock.Unlock()
waiters := f.fakeClock.waiters
seekChan := f.waiter.destChan
for i := range waiters {
if waiters[i].destChan == seekChan {
waiters[i].targetTime = f.fakeClock.time.Add(d)
return true
}
}
// No existing waiter, timer has already fired or been reset.
// We should still enable Reset() to succeed by creating a
// new waiter and adding it to the clock's waiters.
newWaiter := fakeClockWaiter{
targetTime: f.fakeClock.time.Add(d),
destChan: seekChan,
}
f.fakeClock.waiters = append(f.fakeClock.waiters, newWaiter)
return true
}
// Ticker defines the Ticker interface
type Ticker interface {
C() <-chan time.Time
Stop()
}
type realTicker struct {
ticker *time.Ticker
}
func (t *realTicker) C() <-chan time.Time {
return t.ticker.C
}
func (t *realTicker) Stop() {
t.ticker.Stop()
}
type fakeTicker struct {
c <-chan time.Time
}
func (t *fakeTicker) C() <-chan time.Time {
return t.c
}
func (t *fakeTicker) Stop() {
}

View File

@@ -132,14 +132,14 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
// Return whatever remaining data exists from an in progress frame
if n := len(r.remaining); n > 0 {
if n <= len(data) {
//lint:ignore SA4006,SA4010 underlying array of data is modified here.
//nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here.
data = append(data[0:0], r.remaining...)
r.remaining = nil
return n, nil
}
n = len(data)
//lint:ignore SA4006,SA4010 underlying array of data is modified here.
//nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here.
data = append(data[0:0], r.remaining[:n]...)
r.remaining = r.remaining[n:]
return n, io.ErrShortBuffer
@@ -157,7 +157,7 @@ func (r *jsonFrameReader) Read(data []byte) (int, error) {
// and set m to it, which means we need to copy the partial result back into data and preserve
// the remaining result for subsequent reads.
if len(m) > n {
//lint:ignore SA4006,SA4010 underlying array of data is modified here.
//nolint:staticcheck // SA4006,SA4010 underlying array of data is modified here.
data = append(data[0:0], m[:n]...)
r.remaining = m[n:]
return n, io.ErrShortBuffer

View File

@@ -1,3 +1,4 @@
//go:build !notest
// +build !notest
/*

View File

@@ -17,10 +17,11 @@ limitations under the License.
package json
import (
"bytes"
"encoding/json"
"fmt"
"io"
kjson "sigs.k8s.io/json"
)
// NewEncoder delegates to json.NewEncoder
@@ -38,50 +39,11 @@ func Marshal(v interface{}) ([]byte, error) {
// limit recursive depth to prevent stack overflow errors
const maxDepth = 10000
// Unmarshal unmarshals the given data
// If v is a *map[string]interface{}, *[]interface{}, or *interface{} numbers
// are converted to int64 or float64
// Unmarshal unmarshals the given data.
// Object keys are case-sensitive.
// Numbers decoded into interface{} fields are converted to int64 or float64.
func Unmarshal(data []byte, v interface{}) error {
switch v := v.(type) {
case *map[string]interface{}:
// Build a decoder from the given data
decoder := json.NewDecoder(bytes.NewBuffer(data))
// Preserve numbers, rather than casting to float64 automatically
decoder.UseNumber()
// Run the decode
if err := decoder.Decode(v); err != nil {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return ConvertMapNumbers(*v, 0)
case *[]interface{}:
// Build a decoder from the given data
decoder := json.NewDecoder(bytes.NewBuffer(data))
// Preserve numbers, rather than casting to float64 automatically
decoder.UseNumber()
// Run the decode
if err := decoder.Decode(v); err != nil {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return ConvertSliceNumbers(*v, 0)
case *interface{}:
// Build a decoder from the given data
decoder := json.NewDecoder(bytes.NewBuffer(data))
// Preserve numbers, rather than casting to float64 automatically
decoder.UseNumber()
// Run the decode
if err := decoder.Decode(v); err != nil {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return ConvertInterfaceNumbers(v, 0)
default:
return json.Unmarshal(data, v)
}
return kjson.UnmarshalCaseSensitivePreserveInts(data, v)
}
// ConvertInterfaceNumbers converts any json.Number values to int64 or float64.

View File

@@ -35,7 +35,22 @@ import (
// that no managed fields were found for the fieldManager because other field managers
// have taken ownership of all the fields previously owned by the fieldManager. It is
// also possible the fieldManager never owned fields.
func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldManager string, applyConfiguration interface{}) error {
//
// The provided object MUST bo a root resource object since subresource objects
// do not contain their own managed fields. For example, an autoscaling.Scale
// object read from a "scale" subresource does not have any managed fields and so
// cannot be used as the object.
//
// If the fields of a subresource are a subset of the fields of the root object,
// and their field paths and types are exactly the same, then ExtractInto can be
// called with the root resource as the object and the subresource as the
// applyConfiguration. This works for "status", obviously, because status is
// represented by the exact same object as the root resource. This this does NOT
// work, for example, with the "scale" subresources of Deployment, ReplicaSet and
// StatefulSet. While the spec.replicas, status.replicas fields are in the same
// exact field path locations as they are in autoscaling.Scale, the selector
// fields are in different locations, and are a different type.
func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldManager string, applyConfiguration interface{}, subresource string) error {
typedObj, err := toTyped(object, objectType)
if err != nil {
return fmt.Errorf("error converting obj to typed: %w", err)
@@ -45,7 +60,7 @@ func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldMan
if err != nil {
return fmt.Errorf("error accessing metadata: %w", err)
}
fieldsEntry, ok := findManagedFields(accessor, fieldManager)
fieldsEntry, ok := findManagedFields(accessor, fieldManager, subresource)
if !ok {
return nil
}
@@ -60,16 +75,23 @@ func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldMan
if !ok {
return fmt.Errorf("unable to convert managed fields for %s to unstructured, expected map, got %T", fieldManager, u)
}
// set the type meta manually if it doesn't exist to avoid missing kind errors
// when decoding from unstructured JSON
if _, ok := m["kind"]; !ok && object.GetObjectKind().GroupVersionKind().Kind != "" {
m["kind"] = object.GetObjectKind().GroupVersionKind().Kind
m["apiVersion"] = object.GetObjectKind().GroupVersionKind().GroupVersion().String()
}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, applyConfiguration); err != nil {
return fmt.Errorf("error extracting into obj from unstructured: %w", err)
}
return nil
}
func findManagedFields(accessor metav1.Object, fieldManager string) (metav1.ManagedFieldsEntry, bool) {
func findManagedFields(accessor metav1.Object, fieldManager string, subresource string) (metav1.ManagedFieldsEntry, bool) {
objManagedFields := accessor.GetManagedFields()
for _, mf := range objManagedFields {
if mf.Manager == fieldManager && mf.Operation == metav1.ManagedFieldsOperationApply {
if mf.Manager == fieldManager && mf.Operation == metav1.ManagedFieldsOperationApply && mf.Subresource == subresource {
return mf, true
}
}

View File

@@ -0,0 +1,127 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package managedfields
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kube-openapi/pkg/schemaconv"
"k8s.io/kube-openapi/pkg/util/proto"
"sigs.k8s.io/structured-merge-diff/v4/typed"
)
// groupVersionKindExtensionKey is the key used to lookup the
// GroupVersionKind value for an object definition from the
// definition's "extensions" map.
const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind"
// GvkParser contains a Parser that allows introspecting the schema.
type GvkParser struct {
gvks map[schema.GroupVersionKind]string
parser typed.Parser
}
// Type returns a helper which can produce objects of the given type. Any
// errors are deferred until a further function is called.
func (p *GvkParser) Type(gvk schema.GroupVersionKind) *typed.ParseableType {
typeName, ok := p.gvks[gvk]
if !ok {
return nil
}
t := p.parser.Type(typeName)
return &t
}
// NewGVKParser builds a GVKParser from a proto.Models. This
// will automatically find the proper version of the object, and the
// corresponding schema information.
func NewGVKParser(models proto.Models, preserveUnknownFields bool) (*GvkParser, error) {
typeSchema, err := schemaconv.ToSchemaWithPreserveUnknownFields(models, preserveUnknownFields)
if err != nil {
return nil, fmt.Errorf("failed to convert models to schema: %v", err)
}
parser := GvkParser{
gvks: map[schema.GroupVersionKind]string{},
}
parser.parser = typed.Parser{Schema: *typeSchema}
for _, modelName := range models.ListModels() {
model := models.LookupModel(modelName)
if model == nil {
panic(fmt.Sprintf("ListModels returns a model that can't be looked-up for: %v", modelName))
}
gvkList := parseGroupVersionKind(model)
for _, gvk := range gvkList {
if len(gvk.Kind) > 0 {
_, ok := parser.gvks[gvk]
if ok {
return nil, fmt.Errorf("duplicate entry for %v", gvk)
}
parser.gvks[gvk] = modelName
}
}
}
return &parser, nil
}
// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one.
func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind {
extensions := s.GetExtensions()
gvkListResult := []schema.GroupVersionKind{}
// Get the extensions
gvkExtension, ok := extensions[groupVersionKindExtensionKey]
if !ok {
return []schema.GroupVersionKind{}
}
// gvk extension must be a list of at least 1 element.
gvkList, ok := gvkExtension.([]interface{})
if !ok {
return []schema.GroupVersionKind{}
}
for _, gvk := range gvkList {
// gvk extension list must be a map with group, version, and
// kind fields
gvkMap, ok := gvk.(map[interface{}]interface{})
if !ok {
continue
}
group, ok := gvkMap["group"].(string)
if !ok {
continue
}
version, ok := gvkMap["version"].(string)
if !ok {
continue
}
kind, ok := gvkMap["kind"].(string)
if !ok {
continue
}
gvkListResult = append(gvkListResult, schema.GroupVersionKind{
Group: group,
Version: version,
Kind: kind,
})
}
return gvkListResult
}

View File

@@ -39,6 +39,7 @@ import (
"golang.org/x/net/http2"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
)
// JoinPreservingTrailingSlash does a path.Join of the specified elements,
@@ -113,6 +114,7 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport {
t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
}
// If no custom dialer is set, use the default context dialer
//lint:file-ignore SA1019 Keep supporting deprecated Dial method of custom transports
if t.DialContext == nil && t.Dial == nil {
t.DialContext = defaultTransport.DialContext
}
@@ -236,6 +238,29 @@ func DialerFor(transport http.RoundTripper) (DialFunc, error) {
}
}
// CloseIdleConnectionsFor close idles connections for the Transport.
// If the Transport is wrapped it iterates over the wrapped round trippers
// until it finds one that implements the CloseIdleConnections method.
// If the Transport does not have a CloseIdleConnections method
// then this function does nothing.
func CloseIdleConnectionsFor(transport http.RoundTripper) {
if transport == nil {
return
}
type closeIdler interface {
CloseIdleConnections()
}
switch transport := transport.(type) {
case closeIdler:
transport.CloseIdleConnections()
case RoundTripperWrapper:
CloseIdleConnectionsFor(transport.WrappedRoundTripper())
default:
klog.Warningf("unknown transport type: %T", transport)
}
}
type TLSClientConfigHolder interface {
TLSClientConfig() *tls.Config
}
@@ -288,7 +313,7 @@ func SourceIPs(req *http.Request) []net.IP {
// Use the first valid one.
parts := strings.Split(hdrForwardedFor, ",")
for _, part := range parts {
ip := net.ParseIP(strings.TrimSpace(part))
ip := netutils.ParseIPSloppy(strings.TrimSpace(part))
if ip != nil {
srcIPs = append(srcIPs, ip)
}
@@ -298,7 +323,7 @@ func SourceIPs(req *http.Request) []net.IP {
// Try the X-Real-Ip header.
hdrRealIp := hdr.Get("X-Real-Ip")
if hdrRealIp != "" {
ip := net.ParseIP(hdrRealIp)
ip := netutils.ParseIPSloppy(hdrRealIp)
// Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain.
if ip != nil && !containsIP(srcIPs, ip) {
srcIPs = append(srcIPs, ip)
@@ -310,11 +335,11 @@ func SourceIPs(req *http.Request) []net.IP {
// Remote Address in Go's HTTP server is in the form host:port so we need to split that first.
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err == nil {
remoteIP = net.ParseIP(host)
remoteIP = netutils.ParseIPSloppy(host)
}
// Fallback if Remote Address was just IP.
if remoteIP == nil {
remoteIP = net.ParseIP(req.RemoteAddr)
remoteIP = netutils.ParseIPSloppy(req.RemoteAddr)
}
// Don't duplicate remote IP if it's already the last address in the chain.
@@ -381,7 +406,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
cidrs := []*net.IPNet{}
for _, noProxyRule := range noProxyRules {
_, cidr, _ := net.ParseCIDR(noProxyRule)
_, cidr, _ := netutils.ParseCIDRSloppy(noProxyRule)
if cidr != nil {
cidrs = append(cidrs, cidr)
}
@@ -392,7 +417,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
}
return func(req *http.Request) (*url.URL, error) {
ip := net.ParseIP(req.URL.Hostname())
ip := netutils.ParseIPSloppy(req.URL.Hostname())
if ip == nil {
return delegate(req)
}

View File

@@ -27,6 +27,7 @@ import (
"strings"
"k8s.io/klog/v2"
netutils "k8s.io/utils/net"
)
type AddressFamily uint
@@ -221,7 +222,7 @@ func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error)
if len(addrs) > 0 {
for i := range addrs {
klog.V(4).Infof("Checking addr %s.", addrs[i].String())
ip, _, err := net.ParseCIDR(addrs[i].String())
ip, _, err := netutils.ParseCIDRSloppy(addrs[i].String())
if err != nil {
return nil, err
}
@@ -336,7 +337,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFam
continue
}
for _, addr := range addrs {
ip, _, err := net.ParseCIDR(addr.String())
ip, _, err := netutils.ParseCIDRSloppy(addr.String())
if err != nil {
return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err)
}

View File

@@ -31,22 +31,22 @@ type PatchMeta struct {
patchMergeKey string
}
func (pm PatchMeta) GetPatchStrategies() []string {
func (pm *PatchMeta) GetPatchStrategies() []string {
if pm.patchStrategies == nil {
return []string{}
}
return pm.patchStrategies
}
func (pm PatchMeta) SetPatchStrategies(ps []string) {
func (pm *PatchMeta) SetPatchStrategies(ps []string) {
pm.patchStrategies = ps
}
func (pm PatchMeta) GetPatchMergeKey() string {
func (pm *PatchMeta) GetPatchMergeKey() string {
return pm.patchMergeKey
}
func (pm PatchMeta) SetPatchMergeKey(pmk string) {
func (pm *PatchMeta) SetPatchMergeKey(pmk string) {
pm.patchMergeKey = pmk
}

View File

@@ -987,10 +987,10 @@ func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey
return nil
}
var nonDeleteList, toDeleteList []interface{}
var nonDeleteList []interface{}
var err error
if len(mergeKey) > 0 {
nonDeleteList, toDeleteList, err = extractToDeleteItems(typedPatchList)
nonDeleteList, _, err = extractToDeleteItems(typedPatchList)
if err != nil {
return err
}
@@ -1018,7 +1018,6 @@ func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey
if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) {
return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList)
}
typedPatchList = append(nonDeleteList, toDeleteList...)
return nil
}
@@ -1329,15 +1328,19 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me
_, ok := original[k]
if !ok {
// If it's not in the original document, just take the patch value.
original[k] = patchV
if !isDeleteList {
// If it's not in the original document, just take the patch value.
original[k] = patchV
}
continue
}
originalType := reflect.TypeOf(original[k])
patchType := reflect.TypeOf(patchV)
if originalType != patchType {
original[k] = patchV
if !isDeleteList {
original[k] = patchV
}
continue
}
// If they're both maps or lists, recurse into the value.

View File

@@ -181,7 +181,7 @@ func Invalid(field *Path, value interface{}, detail string) *Error {
// valid values).
func NotSupported(field *Path, value interface{}, validValues []string) *Error {
detail := ""
if validValues != nil && len(validValues) > 0 {
if len(validValues) > 0 {
quotedValues := make([]string, len(validValues))
for i, v := range validValues {
quotedValues[i] = strconv.Quote(v)
@@ -239,6 +239,9 @@ func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher {
// ToAggregate converts the ErrorList into an errors.Aggregate.
func (list ErrorList) ToAggregate() utilerrors.Aggregate {
if len(list) == 0 {
return nil
}
errs := make([]error, 0, len(list))
errorMsgs := sets.NewString()
for _, err := range list {

View File

@@ -25,6 +25,7 @@ import (
"strings"
"k8s.io/apimachinery/pkg/util/validation/field"
netutils "k8s.io/utils/net"
)
const qnameCharFmt string = "[A-Za-z0-9]"
@@ -346,7 +347,7 @@ func IsValidPortName(port string) []string {
// IsValidIP tests that the argument is a valid IP address.
func IsValidIP(value string) []string {
if net.ParseIP(value) == nil {
if netutils.ParseIPSloppy(value) == nil {
return []string{"must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)"}
}
return nil
@@ -355,7 +356,7 @@ func IsValidIP(value string) []string {
// IsValidIPv4Address tests that the argument is a valid IPv4 address.
func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
var allErrors field.ErrorList
ip := net.ParseIP(value)
ip := netutils.ParseIPSloppy(value)
if ip == nil || ip.To4() == nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address"))
}
@@ -365,7 +366,7 @@ func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
// IsValidIPv6Address tests that the argument is a valid IPv6 address.
func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList {
var allErrors field.ErrorList
ip := net.ParseIP(value)
ip := netutils.ParseIPSloppy(value)
if ip == nil || ip.To4() != nil {
allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address"))
}

View File

@@ -24,8 +24,8 @@ import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/utils/clock"
)
// For any test of the style:
@@ -166,6 +166,9 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan
// of every loop to prevent extra executions of f().
select {
case <-stopCh:
if !t.Stop() {
<-t.C()
}
return
case <-t.C():
}
@@ -205,10 +208,29 @@ var ErrWaitTimeout = errors.New("timed out waiting for the condition")
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
// ConditionWithContextFunc returns true if the condition is satisfied, or an error
// if the loop should be aborted.
//
// The caller passes along a context that can be used by the condition function.
type ConditionWithContextFunc func(context.Context) (done bool, err error)
// WithContext converts a ConditionFunc into a ConditionWithContextFunc
func (cf ConditionFunc) WithContext() ConditionWithContextFunc {
return func(context.Context) (done bool, err error) {
return cf()
}
}
// runConditionWithCrashProtection runs a ConditionFunc with crash protection
func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext())
}
// runConditionWithCrashProtectionWithContext runs a
// ConditionWithContextFunc with crash protection.
func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) {
defer runtime.HandleCrash()
return condition()
return condition(ctx)
}
// Backoff holds parameters applied to a Backoff function.
@@ -418,13 +440,62 @@ func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
//
// If you want to Poll something forever, see PollInfinite.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
return pollInternal(poller(interval, timeout), condition)
return PollWithContext(context.Background(), interval, timeout, condition.WithContext())
}
func pollInternal(wait WaitFunc, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return WaitFor(wait, condition, done)
// PollWithContext tries a condition func until it returns true, an error,
// or when the context expires or the timeout is reached, whichever
// happens first.
//
// PollWithContext always waits the interval before the run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, false, poller(interval, timeout), condition)
}
// PollUntil tries a condition func until it returns true, an error or stopCh is
// closed.
//
// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
ctx, cancel := contextForChannel(stopCh)
defer cancel()
return PollUntilWithContext(ctx, interval, condition.WithContext())
}
// PollUntilWithContext tries a condition func until it returns true,
// an error or the specified context is cancelled or expired.
//
// PollUntilWithContext always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, false, poller(interval, 0), condition)
}
// PollInfinite tries a condition func until it returns true or an error
//
// PollInfinite always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
return PollInfiniteWithContext(context.Background(), interval, condition.WithContext())
}
// PollInfiniteWithContext tries a condition func until it returns true or an error
//
// PollInfiniteWithContext always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, false, poller(interval, 0), condition)
}
// PollImmediate tries a condition func until it returns true, an error, or the timeout
@@ -438,30 +509,40 @@ func pollInternal(wait WaitFunc, condition ConditionFunc) error {
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
return pollImmediateInternal(poller(interval, timeout), condition)
return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext())
}
func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
done, err := runConditionWithCrashProtection(condition)
if err != nil {
return err
}
if done {
return nil
}
return pollInternal(wait, condition)
}
// PollInfinite tries a condition func until it returns true or an error
// PollImmediateWithContext tries a condition func until it returns true, an error,
// or the timeout is reached or the specified context expires, whichever happens first.
//
// PollInfinite always waits the interval before the run of 'condition'.
// PollImmediateWithContext always checks 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return PollUntil(interval, condition, done)
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, true, poller(interval, timeout), condition)
}
// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
//
// PollImmediateUntil runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
ctx, cancel := contextForChannel(stopCh)
defer cancel()
return PollImmediateUntilWithContext(ctx, interval, condition.WithContext())
}
// PollImmediateUntilWithContext tries a condition func until it returns true,
// an error or the specified context is cancelled or expired.
//
// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, true, poller(interval, 0), condition)
}
// PollImmediateInfinite tries a condition func until it returns true or an error
@@ -471,44 +552,46 @@ func PollInfinite(interval time.Duration, condition ConditionFunc) error {
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
done, err := runConditionWithCrashProtection(condition)
if err != nil {
return err
}
if done {
return nil
}
return PollInfinite(interval, condition)
return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext())
}
// PollUntil tries a condition func until it returns true, an error or stopCh is
// closed.
// PollImmediateInfiniteWithContext tries a condition func until it returns true
// or an error or the specified context gets cancelled or expired.
//
// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
ctx, cancel := contextForChannel(stopCh)
defer cancel()
return WaitFor(poller(interval, 0), condition, ctx.Done())
// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
return poll(ctx, true, poller(interval, 0), condition)
}
// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
//
// PollImmediateUntil runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
done, err := condition()
if err != nil {
return err
}
if done {
return nil
// Internally used, each of the the public 'Poll*' function defined in this
// package should invoke this internal function with appropriate parameters.
// ctx: the context specified by the caller, for infinite polling pass
// a context that never gets cancelled or expired.
// immediate: if true, the 'condition' will be invoked before waiting for the interval,
// in this case 'condition' will always be invoked at least once.
// wait: user specified WaitFunc function that controls at what interval the condition
// function should be invoked periodically and whether it is bound by a timeout.
// condition: user specified ConditionWithContextFunc function.
func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error {
if immediate {
done, err := runConditionWithCrashProtectionWithContext(ctx, condition)
if err != nil {
return err
}
if done {
return nil
}
}
select {
case <-stopCh:
case <-ctx.Done():
// returning ctx.Err() will break backward compatibility
return ErrWaitTimeout
default:
return PollUntil(interval, condition, stopCh)
return WaitForWithContext(ctx, wait, condition)
}
}
@@ -516,6 +599,20 @@ func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh
// should be executed and is closed when the last test should be invoked.
type WaitFunc func(done <-chan struct{}) <-chan struct{}
// WithContext converts the WaitFunc to an equivalent WaitWithContextFunc
func (w WaitFunc) WithContext() WaitWithContextFunc {
return func(ctx context.Context) <-chan struct{} {
return w(ctx.Done())
}
}
// WaitWithContextFunc creates a channel that receives an item every time a test
// should be executed and is closed when the last test should be invoked.
//
// When the specified context gets cancelled or expires the function
// stops sending item and returns immediately.
type WaitWithContextFunc func(ctx context.Context) <-chan struct{}
// WaitFor continually checks 'fn' as driven by 'wait'.
//
// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
@@ -532,13 +629,35 @@ type WaitFunc func(done <-chan struct{}) <-chan struct{}
// "uniform pseudo-random", the `fn` might still run one or multiple time,
// though eventually `WaitFor` will return.
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
stopCh := make(chan struct{})
defer close(stopCh)
c := wait(stopCh)
ctx, cancel := contextForChannel(done)
defer cancel()
return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext())
}
// WaitForWithContext continually checks 'fn' as driven by 'wait'.
//
// WaitForWithContext gets a channel from 'wait()'', and then invokes 'fn'
// once for every value placed on the channel and once more when the
// channel is closed. If the channel is closed and 'fn'
// returns false without error, WaitForWithContext returns ErrWaitTimeout.
//
// If 'fn' returns an error the loop ends and that error is returned. If
// 'fn' returns true the loop ends and nil is returned.
//
// context.Canceled will be returned if the ctx.Done() channel is closed
// without fn ever returning true.
//
// When the ctx.Done() channel is closed, because the golang `select` statement is
// "uniform pseudo-random", the `fn` might still run one or multiple times,
// though eventually `WaitForWithContext` will return.
func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error {
waitCtx, cancel := context.WithCancel(context.Background())
defer cancel()
c := wait(waitCtx)
for {
select {
case _, open := <-c:
ok, err := runConditionWithCrashProtection(fn)
ok, err := runConditionWithCrashProtectionWithContext(ctx, fn)
if err != nil {
return err
}
@@ -548,7 +667,8 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
if !open {
return ErrWaitTimeout
}
case <-done:
case <-ctx.Done():
// returning ctx.Err() will break backward compatibility
return ErrWaitTimeout
}
}
@@ -564,8 +684,8 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.
func poller(interval, timeout time.Duration) WaitFunc {
return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
func poller(interval, timeout time.Duration) WaitWithContextFunc {
return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} {
ch := make(chan struct{})
go func() {
@@ -595,7 +715,7 @@ func poller(interval, timeout time.Duration) WaitFunc {
}
case <-after:
return
case <-done:
case <-ctx.Done():
return
}
}

View File

@@ -59,6 +59,34 @@ func Unmarshal(data []byte, v interface{}) error {
}
}
// UnmarshalStrict unmarshals the given data
// strictly (erroring when there are duplicate fields).
func UnmarshalStrict(data []byte, v interface{}) error {
preserveIntFloat := func(d *json.Decoder) *json.Decoder {
d.UseNumber()
return d
}
switch v := v.(type) {
case *map[string]interface{}:
if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil {
return err
}
return jsonutil.ConvertMapNumbers(*v, 0)
case *[]interface{}:
if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil {
return err
}
return jsonutil.ConvertSliceNumbers(*v, 0)
case *interface{}:
if err := yaml.UnmarshalStrict(data, v, preserveIntFloat); err != nil {
return err
}
return jsonutil.ConvertInterfaceNumbers(v, 0)
default:
return yaml.UnmarshalStrict(data, v)
}
}
// ToJSON converts a single YAML document into a JSON document
// or returns an error. If the document appears to be JSON the
// YAML decoding path is not used (so that error messages are
@@ -291,15 +319,19 @@ func (r *YAMLReader) Read() ([]byte, error) {
if i := bytes.Index(line, []byte(separator)); i == 0 {
// We have a potential document terminator
i += sep
after := line[i:]
if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 {
if buffer.Len() != 0 {
return buffer.Bytes(), nil
}
if err == io.EOF {
return nil, err
trimmed := strings.TrimSpace(string(line[i:]))
// We only allow comments and spaces following the yaml doc separator, otherwise we'll return an error
if len(trimmed) > 0 && string(trimmed[0]) != "#" {
return nil, YAMLSyntaxError{
err: fmt.Errorf("invalid Yaml document separator: %s", trimmed),
}
}
if buffer.Len() != 0 {
return buffer.Bytes(), nil
}
if err == io.EOF {
return nil, err
}
}
if err == io.EOF {
if buffer.Len() != 0 {