go.*: Update k8s packages
- update k8s client_go - update k8s apiextensions-apiserver - update k8s controller-tools Signed-off-by: leonnicolas <leonloechner@gmx.de>
This commit is contained in:
2
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@@ -20,7 +20,6 @@ reviewers:
|
||||
- caesarxuchao
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- erictune
|
||||
- davidopp
|
||||
- pmorie
|
||||
- janetkuo
|
||||
@@ -31,7 +30,6 @@ reviewers:
|
||||
- hongchaodeng
|
||||
- krousey
|
||||
- xiang90
|
||||
- mml
|
||||
- ingvagabund
|
||||
- resouer
|
||||
- jessfraz
|
||||
|
102
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
102
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@@ -21,9 +21,9 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// This file implements a low-level controller that is used in
|
||||
@@ -322,10 +322,10 @@ func NewInformer(
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a Indexer and a controller for populating the index
|
||||
// NewIndexerInformer returns an Indexer and a Controller for populating the index
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// Index for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
@@ -351,7 +351,59 @@ func NewIndexerInformer(
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil)
|
||||
}
|
||||
|
||||
// TransformFunc allows for transforming an object before it will be processed
|
||||
// and put into the controller cache and before the corresponding handlers will
|
||||
// be called on it.
|
||||
// TransformFunc (similarly to ResourceEventHandler functions) should be able
|
||||
// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown
|
||||
//
|
||||
// The most common usage pattern is to clean-up some parts of the object to
|
||||
// reduce component memory usage if a given component doesn't care about them.
|
||||
// given controller doesn't care for them
|
||||
type TransformFunc func(interface{}) (interface{}, error)
|
||||
|
||||
// NewTransformingInformer returns a Store and a controller for populating
|
||||
// the store while also providing event notifications. You should only used
|
||||
// the returned Store for Get/List operations; Add/Modify/Deletes will cause
|
||||
// the event notifications to be faulty.
|
||||
// The given transform function will be called on all objects before they will
|
||||
// put put into the Store and corresponding Add/Modify/Delete handlers will
|
||||
// be invokved for them.
|
||||
func NewTransformingInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
transformer TransformFunc,
|
||||
) (Store, Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer)
|
||||
}
|
||||
|
||||
// NewTransformingIndexerInformer returns an Indexer and a controller for
|
||||
// populating the index while also providing event notifications. You should
|
||||
// only used the returned Index for Get/List operations; Add/Modify/Deletes
|
||||
// will cause the event notifications to be faulty.
|
||||
// The given transform function will be called on all objects before they will
|
||||
// be put into the Index and corresponding Add/Modify/Delete handlers will
|
||||
// be invoked for them.
|
||||
func NewTransformingIndexerInformer(
|
||||
lw ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
indexers Indexers,
|
||||
transformer TransformFunc,
|
||||
) (Indexer, Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer)
|
||||
}
|
||||
|
||||
// newInformer returns a controller for populating the store while also
|
||||
@@ -374,6 +426,7 @@ func newInformer(
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
clientState Store,
|
||||
transformer TransformFunc,
|
||||
) Controller {
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
@@ -393,24 +446,33 @@ func newInformer(
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Replaced, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
obj := d.Object
|
||||
if transformer != nil {
|
||||
var err error
|
||||
obj, err = transformer(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
|
||||
switch d.Type {
|
||||
case Sync, Replaced, Added, Updated:
|
||||
if old, exists, err := clientState.Get(obj); err == nil && exists {
|
||||
if err := clientState.Update(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, obj)
|
||||
} else {
|
||||
if err := clientState.Add(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(obj)
|
||||
}
|
||||
case Deleted:
|
||||
if err := clientState.Delete(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(obj)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
46
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
46
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@@ -20,10 +20,12 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
utiltrace "k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
// DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are
|
||||
@@ -121,7 +123,7 @@ type DeltaFIFO struct {
|
||||
knownObjects KeyListerGetter
|
||||
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
// Currently, not used to gate any of CRUD operations.
|
||||
closed bool
|
||||
|
||||
// emitDeltaTypeReplaced is whether to emit the Replaced or Sync
|
||||
@@ -153,7 +155,7 @@ const (
|
||||
// change happened, and the object's state after* that change.
|
||||
//
|
||||
// [*] Unless the change is a deletion, and then you'll get the final
|
||||
// state of the object before it was deleted.
|
||||
// state of the object before it was deleted.
|
||||
type Delta struct {
|
||||
Type DeltaType
|
||||
Object interface{}
|
||||
@@ -174,9 +176,10 @@ type Deltas []Delta
|
||||
// modifications.
|
||||
//
|
||||
// TODO: consider merging keyLister with this object, tracking a list of
|
||||
// "known" keys when Pop() is called. Have to think about how that
|
||||
// affects error retrying.
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// "known" keys when Pop() is called. Have to think about how that
|
||||
// affects error retrying.
|
||||
//
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// external known object source.
|
||||
// Whether there is a potential race depends on how the consumer
|
||||
// modifies knownObjects. In Pop(), process function is called under
|
||||
@@ -185,8 +188,7 @@ type Deltas []Delta
|
||||
//
|
||||
// Example:
|
||||
// In case of sharedIndexInformer being a consumer
|
||||
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/
|
||||
// src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
|
||||
// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
|
||||
// there is no race as knownObjects (s.indexer) is modified safely
|
||||
// under DeltaFIFO's lock. The only exceptions are GetStore() and
|
||||
// GetIndexer() methods, which expose ways to modify the underlying
|
||||
@@ -340,7 +342,7 @@ func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("object must be of type deltas, but got: %#v", obj)
|
||||
}
|
||||
id, err := f.KeyOf(deltas.Newest().Object)
|
||||
id, err := f.KeyOf(deltas)
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
@@ -373,13 +375,8 @@ func dedupDeltas(deltas Deltas) Deltas {
|
||||
a := &deltas[n-1]
|
||||
b := &deltas[n-2]
|
||||
if out := isDup(a, b); out != nil {
|
||||
// `a` and `b` are duplicates. Only keep the one returned from isDup().
|
||||
// TODO: This extra array allocation and copy seems unnecessary if
|
||||
// all we do to dedup is compare the new delta with the last element
|
||||
// in `items`, which could be done by mutating `items` directly.
|
||||
// Might be worth profiling and investigating if it is safe to optimize.
|
||||
d := append(Deltas{}, deltas[:n-2]...)
|
||||
return append(d, *out)
|
||||
deltas[n-2] = *out
|
||||
return deltas[:n-1]
|
||||
}
|
||||
return deltas
|
||||
}
|
||||
@@ -461,8 +458,8 @@ func (f *DeltaFIFO) listLocked() []interface{} {
|
||||
func (f *DeltaFIFO) ListKeys() []string {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list := make([]string, 0, len(f.items))
|
||||
for key := range f.items {
|
||||
list := make([]string, 0, len(f.queue))
|
||||
for _, key := range f.queue {
|
||||
list = append(list, key)
|
||||
}
|
||||
return list
|
||||
@@ -531,6 +528,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
}
|
||||
id := f.queue[0]
|
||||
f.queue = f.queue[1:]
|
||||
depth := len(f.queue)
|
||||
if f.initialPopulationCount > 0 {
|
||||
f.initialPopulationCount--
|
||||
}
|
||||
@@ -541,6 +539,18 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
// Only log traces if the queue depth is greater than 10 and it takes more than
|
||||
// 100 milliseconds to process one item from the queue.
|
||||
// Queue depth never goes high because processing an item is locking the queue,
|
||||
// and new items can't be added until processing finish.
|
||||
// https://github.com/kubernetes/kubernetes/issues/103789
|
||||
if depth > 10 {
|
||||
trace := utiltrace.New("DeltaFIFO Pop Process",
|
||||
utiltrace.Field{Key: "ID", Value: id},
|
||||
utiltrace.Field{Key: "Depth", Value: depth},
|
||||
utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"})
|
||||
defer trace.LogIfLong(100 * time.Millisecond)
|
||||
}
|
||||
err := process(item)
|
||||
if e, ok := err.(ErrRequeue); ok {
|
||||
f.addIfNotPresent(id, item)
|
||||
@@ -562,7 +572,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
// of the Deltas associated with K. Otherwise the pre-existing keys
|
||||
// are those listed by `f.knownObjects` and the current object of K is
|
||||
// what `f.knownObjects.GetByKey(K)` returns.
|
||||
func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
keys := make(sets.String, len(list))
|
||||
|
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
@@ -20,8 +20,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
|
2
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
@@ -17,8 +17,8 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
type fakeThreadSafeMap struct {
|
||||
|
7
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
7
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
@@ -127,7 +127,7 @@ type FIFO struct {
|
||||
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
// Currently, not used to gate any of CRUD operations.
|
||||
closed bool
|
||||
}
|
||||
|
||||
@@ -263,10 +263,7 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
func (f *FIFO) IsClosed() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return f.closed
|
||||
}
|
||||
|
||||
// Pop waits until an item is ready and processes it. If multiple items are
|
||||
|
5
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
5
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
@@ -304,10 +304,7 @@ func (h *Heap) GetByKey(key string) (interface{}, bool, error) {
|
||||
func (h *Heap) IsClosed() bool {
|
||||
h.lock.RLock()
|
||||
defer h.lock.RUnlock()
|
||||
if h.closed {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return h.closed
|
||||
}
|
||||
|
||||
// NewHeap returns a Heap which can be used to queue up items to process.
|
||||
|
4
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
@@ -78,7 +78,7 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
|
||||
}
|
||||
|
||||
const (
|
||||
// NamespaceIndex is the lookup name for the most comment index function, which is to index by the namespace field.
|
||||
// NamespaceIndex is the lookup name for the most common index function, which is to index by the namespace field.
|
||||
NamespaceIndex string = "namespace"
|
||||
)
|
||||
|
||||
@@ -94,7 +94,7 @@ func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) {
|
||||
// Index maps the indexed value to a set of keys in the store that match on that value
|
||||
type Index map[string]sets.String
|
||||
|
||||
// Indexers maps a name to a IndexFunc
|
||||
// Indexers maps a name to an IndexFunc
|
||||
type Indexers map[string]IndexFunc
|
||||
|
||||
// Indices maps a name to an Index
|
||||
|
2
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
@@ -99,7 +99,7 @@ func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
for {
|
||||
if d.lastRotated.IsZero() {
|
||||
d.lastRotated = time.Now()
|
||||
} else if time.Now().Sub(d.lastRotated) > d.retainDuration {
|
||||
} else if time.Since(d.lastRotated) > d.retainDuration {
|
||||
d.retainedCachedObjs = d.cachedObjs
|
||||
d.cachedObjs = nil
|
||||
d.lastRotated = time.Now()
|
||||
|
18
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
18
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@@ -32,7 +32,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/naming"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -40,6 +39,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
@@ -69,7 +69,7 @@ type Reflector struct {
|
||||
|
||||
// backoff manages backoff of ListWatch
|
||||
backoffManager wait.BackoffManager
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch calll of ListAndWatch.
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch.
|
||||
initConnBackoffManager wait.BackoffManager
|
||||
|
||||
resyncPeriod time.Duration
|
||||
@@ -216,13 +216,13 @@ var internalPackages = []string{"client-go/tools/cache/"}
|
||||
// objects and subsequent deltas.
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
klog.V(2).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
wait.BackoffUntil(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
r.watchErrorHandler(r, err)
|
||||
}
|
||||
}, r.backoffManager, true, stopCh)
|
||||
klog.V(2).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -319,7 +319,9 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
panic(r)
|
||||
case <-listCh:
|
||||
}
|
||||
initTrace.Step("Objects listed", trace.Field{"error", err})
|
||||
if err != nil {
|
||||
klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err)
|
||||
return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err)
|
||||
}
|
||||
|
||||
@@ -338,7 +340,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
}
|
||||
|
||||
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to understand list result %#v: %v", list, err)
|
||||
@@ -417,7 +418,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case begin exponentially backing off and resend watch request.
|
||||
if utilnet.IsConnectionRefused(err) {
|
||||
// Do the same for "429" errors.
|
||||
if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) {
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
}
|
||||
@@ -432,6 +434,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
case apierrors.IsTooManyRequests(err):
|
||||
klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName)
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
default:
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
}
|
||||
|
16
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
16
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@@ -23,10 +23,10 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/buffer"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
@@ -368,6 +368,10 @@ func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) er
|
||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
if s.HasStarted() {
|
||||
klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed")
|
||||
return
|
||||
}
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: s.indexer,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
@@ -410,6 +414,12 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
s.controller.Run(stopCh)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HasStarted() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
return s.started
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HasSynced() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
@@ -694,9 +704,9 @@ type processorListener struct {
|
||||
// full resync from the shared informer, but modified by two
|
||||
// adjustments. One is imposing a lower bound,
|
||||
// `minimumResyncPeriod`. The other is another lower bound, the
|
||||
// sharedProcessor's `resyncCheckPeriod`, that is imposed (a) only
|
||||
// sharedIndexInformer's `resyncCheckPeriod`, that is imposed (a) only
|
||||
// in AddEventHandlerWithResyncPeriod invocations made after the
|
||||
// sharedProcessor starts and (b) only if the informer does
|
||||
// sharedIndexInformer starts and (b) only if the informer does
|
||||
// resyncs at all.
|
||||
requestedResyncPeriod time.Duration
|
||||
// resyncPeriod is the threshold that will be used in the logic
|
||||
|
5
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
5
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
@@ -85,6 +85,11 @@ func (k KeyError) Error() string {
|
||||
return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err)
|
||||
}
|
||||
|
||||
// Unwrap implements errors.Unwrap
|
||||
func (k KeyError) Unwrap() error {
|
||||
return k.Err
|
||||
}
|
||||
|
||||
// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for
|
||||
// the object but not the object itself.
|
||||
type ExplicitKey string
|
||||
|
90
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
90
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
@@ -90,7 +90,7 @@ func (c *threadSafeMap) Delete(key string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if obj, exists := c.items[key]; exists {
|
||||
c.deleteFromIndices(obj, key)
|
||||
c.updateIndices(obj, nil, key)
|
||||
delete(c.items, key)
|
||||
}
|
||||
}
|
||||
@@ -251,61 +251,73 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj
|
||||
// updateIndices modifies the objects location in the managed indexes:
|
||||
// - for create you must provide only the newObj
|
||||
// - for update you must provide both the oldObj and the newObj
|
||||
// - for delete you must provide only the oldObj
|
||||
// updateIndices must be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) {
|
||||
// if we got an old object, we need to remove it before we add it again
|
||||
if oldObj != nil {
|
||||
c.deleteFromIndices(oldObj, key)
|
||||
}
|
||||
var oldIndexValues, indexValues []string
|
||||
var err error
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(newObj)
|
||||
if oldObj != nil {
|
||||
oldIndexValues, err = indexFunc(oldObj)
|
||||
} else {
|
||||
oldIndexValues = oldIndexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
if newObj != nil {
|
||||
indexValues, err = indexFunc(newObj)
|
||||
} else {
|
||||
indexValues = indexValues[:0]
|
||||
}
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
index = Index{}
|
||||
c.indices[name] = index
|
||||
}
|
||||
|
||||
for _, indexValue := range indexValues {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] {
|
||||
// We optimize for the most common case where indexFunc returns a single value which has not been changed
|
||||
continue
|
||||
}
|
||||
|
||||
for _, value := range oldIndexValues {
|
||||
c.deleteKeyFromIndex(key, value, index)
|
||||
}
|
||||
for _, value := range indexValues {
|
||||
c.addKeyToIndex(key, value, index)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteFromIndices removes the object from each of the managed indexes
|
||||
// it is intended to be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
func (c *threadSafeMap) addKeyToIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
set = sets.String{}
|
||||
index[indexValue] = set
|
||||
}
|
||||
set.Insert(key)
|
||||
}
|
||||
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
continue
|
||||
}
|
||||
for _, indexValue := range indexValues {
|
||||
set := index[indexValue]
|
||||
if set != nil {
|
||||
set.Delete(key)
|
||||
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
func (c *threadSafeMap) deleteKeyFromIndex(key, indexValue string, index Index) {
|
||||
set := index[indexValue]
|
||||
if set == nil {
|
||||
return
|
||||
}
|
||||
set.Delete(key)
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user