go.mod: bump client-go and api machinerie
I had to run `make generate`. Some API functions got additional parameters `Options` and `Context`. I used empty options and `context.TODO()` for now. Signed-off-by: leonnicolas <leonloechner@gmx.de>
This commit is contained in:
11
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
11
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
@@ -20,32 +20,21 @@ reviewers:
|
||||
- caesarxuchao
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- erictune
|
||||
- davidopp
|
||||
- pmorie
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- eparis
|
||||
- soltysh
|
||||
- jsafrane
|
||||
- dims
|
||||
- madhusudancs
|
||||
- hongchaodeng
|
||||
- krousey
|
||||
- markturansky
|
||||
- fgrzadkowski
|
||||
- xiang90
|
||||
- mml
|
||||
- ingvagabund
|
||||
- resouer
|
||||
- jessfraz
|
||||
- david-mcmahon
|
||||
- mfojtik
|
||||
- '249043822'
|
||||
- lixiaobing10051267
|
||||
- ddysher
|
||||
- mqliang
|
||||
- feihujiang
|
||||
- sdminonne
|
||||
- ncdc
|
||||
|
88
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
88
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
@@ -26,7 +26,16 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// Config contains all the settings for a Controller.
|
||||
// This file implements a low-level controller that is used in
|
||||
// sharedIndexInformer, which is an implementation of
|
||||
// SharedIndexInformer. Such informers, in turn, are key components
|
||||
// in the high level controllers that form the backbone of the
|
||||
// Kubernetes control plane. Look at those for examples, or the
|
||||
// example in
|
||||
// https://github.com/kubernetes/client-go/tree/master/examples/workqueue
|
||||
// .
|
||||
|
||||
// Config contains all the settings for one of these low-level controllers.
|
||||
type Config struct {
|
||||
// The queue for your objects - has to be a DeltaFIFO due to
|
||||
// assumptions in the implementation. Your Process() function
|
||||
@@ -36,31 +45,36 @@ type Config struct {
|
||||
// Something that can list and watch your objects.
|
||||
ListerWatcher
|
||||
|
||||
// Something that can process your objects.
|
||||
// Something that can process a popped Deltas.
|
||||
Process ProcessFunc
|
||||
|
||||
// The type of your objects.
|
||||
// ObjectType is an example object of the type this controller is
|
||||
// expected to handle. Only the type needs to be right, except
|
||||
// that when that is `unstructured.Unstructured` the object's
|
||||
// `"apiVersion"` and `"kind"` must also be right.
|
||||
ObjectType runtime.Object
|
||||
|
||||
// Reprocess everything at least this often.
|
||||
// Note that if it takes longer for you to clear the queue than this
|
||||
// period, you will end up processing items in the order determined
|
||||
// by FIFO.Replace(). Currently, this is random. If this is a
|
||||
// problem, we can change that replacement policy to append new
|
||||
// things to the end of the queue instead of replacing the entire
|
||||
// queue.
|
||||
// FullResyncPeriod is the period at which ShouldResync is considered.
|
||||
FullResyncPeriod time.Duration
|
||||
|
||||
// ShouldResync, if specified, is invoked when the controller's reflector determines the next
|
||||
// periodic sync should occur. If this returns true, it means the reflector should proceed with
|
||||
// the resync.
|
||||
// ShouldResync is periodically used by the reflector to determine
|
||||
// whether to Resync the Queue. If ShouldResync is `nil` or
|
||||
// returns true, it means the reflector should proceed with the
|
||||
// resync.
|
||||
ShouldResync ShouldResyncFunc
|
||||
|
||||
// If true, when Process() returns an error, re-enqueue the object.
|
||||
// TODO: add interface to let you inject a delay/backoff or drop
|
||||
// the object completely if desired. Pass the object in
|
||||
// question to this interface as a parameter.
|
||||
// question to this interface as a parameter. This is probably moot
|
||||
// now that this functionality appears at a higher level.
|
||||
RetryOnError bool
|
||||
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
WatchErrorHandler WatchErrorHandler
|
||||
|
||||
// WatchListPageSize is the requested chunk size of initial and relist watch lists.
|
||||
WatchListPageSize int64
|
||||
}
|
||||
|
||||
// ShouldResyncFunc is a type of function that indicates if a reflector should perform a
|
||||
@@ -71,7 +85,7 @@ type ShouldResyncFunc func() bool
|
||||
// ProcessFunc processes a single object.
|
||||
type ProcessFunc func(obj interface{}) error
|
||||
|
||||
// Controller is a generic controller framework.
|
||||
// `*controller` implements Controller
|
||||
type controller struct {
|
||||
config Config
|
||||
reflector *Reflector
|
||||
@@ -79,9 +93,22 @@ type controller struct {
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// Controller is a low-level controller that is parameterized by a
|
||||
// Config and used in sharedIndexInformer.
|
||||
type Controller interface {
|
||||
// Run does two things. One is to construct and run a Reflector
|
||||
// to pump objects/notifications from the Config's ListerWatcher
|
||||
// to the Config's Queue and possibly invoke the occasional Resync
|
||||
// on that Queue. The other is to repeatedly Pop from the Queue
|
||||
// and process with the Config's ProcessFunc. Both of these
|
||||
// continue until `stopCh` is closed.
|
||||
Run(stopCh <-chan struct{})
|
||||
|
||||
// HasSynced delegates to the Config's Queue
|
||||
HasSynced() bool
|
||||
|
||||
// LastSyncResourceVersion delegates to the Reflector when there
|
||||
// is one, otherwise returns the empty string
|
||||
LastSyncResourceVersion() string
|
||||
}
|
||||
|
||||
@@ -94,7 +121,7 @@ func New(c *Config) Controller {
|
||||
return ctlr
|
||||
}
|
||||
|
||||
// Run begins processing items, and will continue until a value is sent down stopCh.
|
||||
// Run begins processing items, and will continue until a value is sent down stopCh or it is closed.
|
||||
// It's an error to call Run more than once.
|
||||
// Run blocks; call via go.
|
||||
func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
@@ -110,18 +137,22 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
c.config.FullResyncPeriod,
|
||||
)
|
||||
r.ShouldResync = c.config.ShouldResync
|
||||
r.WatchListPageSize = c.config.WatchListPageSize
|
||||
r.clock = c.clock
|
||||
if c.config.WatchErrorHandler != nil {
|
||||
r.watchErrorHandler = c.config.WatchErrorHandler
|
||||
}
|
||||
|
||||
c.reflectorMutex.Lock()
|
||||
c.reflector = r
|
||||
c.reflectorMutex.Unlock()
|
||||
|
||||
var wg wait.Group
|
||||
defer wg.Wait()
|
||||
|
||||
wg.StartWithChannel(stopCh, r.Run)
|
||||
|
||||
wait.Until(c.processLoop, time.Second, stopCh)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Returns true once this controller has completed an initial resource listing
|
||||
@@ -130,6 +161,8 @@ func (c *controller) HasSynced() bool {
|
||||
}
|
||||
|
||||
func (c *controller) LastSyncResourceVersion() string {
|
||||
c.reflectorMutex.RLock()
|
||||
defer c.reflectorMutex.RUnlock()
|
||||
if c.reflector == nil {
|
||||
return ""
|
||||
}
|
||||
@@ -149,7 +182,7 @@ func (c *controller) processLoop() {
|
||||
for {
|
||||
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
|
||||
if err != nil {
|
||||
if err == FIFOClosedError {
|
||||
if err == ErrFIFOClosed {
|
||||
return
|
||||
}
|
||||
if c.config.RetryOnError {
|
||||
@@ -160,9 +193,11 @@ func (c *controller) processLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceEventHandler can handle notifications for events that happen to a
|
||||
// resource. The events are informational only, so you can't return an
|
||||
// error.
|
||||
// ResourceEventHandler can handle notifications for events that
|
||||
// happen to a resource. The events are informational only, so you
|
||||
// can't return an error. The handlers MUST NOT modify the objects
|
||||
// received; this concerns not only the top level of structure but all
|
||||
// the data structures reachable from it.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
@@ -182,7 +217,8 @@ type ResourceEventHandler interface {
|
||||
|
||||
// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
|
||||
// as few of the notification functions as you want while still implementing
|
||||
// ResourceEventHandler.
|
||||
// ResourceEventHandler. This adapter does not remove the prohibition against
|
||||
// modifying the objects.
|
||||
type ResourceEventHandlerFuncs struct {
|
||||
AddFunc func(obj interface{})
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
@@ -214,6 +250,7 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
||||
// in, ensuring the appropriate nested handler method is invoked. An object
|
||||
// that starts passing the filter after an update is considered an add, and an
|
||||
// object that stops passing the filter after an update is considered a delete.
|
||||
// Like the handlers, the filter MUST NOT modify the objects it is given.
|
||||
type FilteringResourceEventHandler struct {
|
||||
FilterFunc func(obj interface{}) bool
|
||||
Handler ResourceEventHandler
|
||||
@@ -341,7 +378,10 @@ func newInformer(
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: clientState,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
})
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
@@ -354,7 +394,7 @@ func newInformer(
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
case Sync, Replaced, Added, Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
|
396
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
396
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@@ -23,24 +23,162 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// NewDeltaFIFO returns a Store which can be used process changes to items.
|
||||
// DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are
|
||||
// optional.
|
||||
type DeltaFIFOOptions struct {
|
||||
|
||||
// KeyFunction is used to figure out what key an object should have. (It's
|
||||
// exposed in the returned DeltaFIFO's KeyOf() method, with additional
|
||||
// handling around deleted objects and queue state).
|
||||
// Optional, the default is MetaNamespaceKeyFunc.
|
||||
KeyFunction KeyFunc
|
||||
|
||||
// KnownObjects is expected to return a list of keys that the consumer of
|
||||
// this queue "knows about". It is used to decide which items are missing
|
||||
// when Replace() is called; 'Deleted' deltas are produced for the missing items.
|
||||
// KnownObjects may be nil if you can tolerate missing deletions on Replace().
|
||||
KnownObjects KeyListerGetter
|
||||
|
||||
// EmitDeltaTypeReplaced indicates that the queue consumer
|
||||
// understands the Replaced DeltaType. Before the `Replaced` event type was
|
||||
// added, calls to Replace() were handled the same as Sync(). For
|
||||
// backwards-compatibility purposes, this is false by default.
|
||||
// When true, `Replaced` events will be sent for items passed to a Replace() call.
|
||||
// When false, `Sync` events will be sent instead.
|
||||
EmitDeltaTypeReplaced bool
|
||||
}
|
||||
|
||||
// DeltaFIFO is like FIFO, but differs in two ways. One is that the
|
||||
// accumulator associated with a given object's key is not that object
|
||||
// but rather a Deltas, which is a slice of Delta values for that
|
||||
// object. Applying an object to a Deltas means to append a Delta
|
||||
// except when the potentially appended Delta is a Deleted and the
|
||||
// Deltas already ends with a Deleted. In that case the Deltas does
|
||||
// not grow, although the terminal Deleted will be replaced by the new
|
||||
// Deleted if the older Deleted's object is a
|
||||
// DeletedFinalStateUnknown.
|
||||
//
|
||||
// keyFunc is used to figure out what key an object should have. (It's
|
||||
// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.)
|
||||
// The other difference is that DeltaFIFO has two additional ways that
|
||||
// an object can be applied to an accumulator: Replaced and Sync.
|
||||
// If EmitDeltaTypeReplaced is not set to true, Sync will be used in
|
||||
// replace events for backwards compatibility. Sync is used for periodic
|
||||
// resync events.
|
||||
//
|
||||
// DeltaFIFO is a producer-consumer queue, where a Reflector is
|
||||
// intended to be the producer, and the consumer is whatever calls
|
||||
// the Pop() method.
|
||||
//
|
||||
// DeltaFIFO solves this use case:
|
||||
// * You want to process every object change (delta) at most once.
|
||||
// * When you process an object, you want to see everything
|
||||
// that's happened to it since you last processed it.
|
||||
// * You want to process the deletion of some of the objects.
|
||||
// * You might want to periodically reprocess objects.
|
||||
//
|
||||
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
|
||||
// interface{} to satisfy the Store/Queue interfaces, but they
|
||||
// will always return an object of type Deltas. List() returns
|
||||
// the newest object from each accumulator in the FIFO.
|
||||
//
|
||||
// A DeltaFIFO's knownObjects KeyListerGetter provides the abilities
|
||||
// to list Store keys and to get objects by Store key. The objects in
|
||||
// question are called "known objects" and this set of objects
|
||||
// modifies the behavior of the Delete, Replace, and Resync methods
|
||||
// (each in a different way).
|
||||
//
|
||||
// A note on threading: If you call Pop() in parallel from multiple
|
||||
// threads, you could end up with multiple threads processing slightly
|
||||
// different versions of the same object.
|
||||
type DeltaFIFO struct {
|
||||
// lock/cond protects access to 'items' and 'queue'.
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// `items` maps a key to a Deltas.
|
||||
// Each such Deltas has at least one Delta.
|
||||
items map[string]Deltas
|
||||
|
||||
// `queue` maintains FIFO order of keys for consumption in Pop().
|
||||
// There are no duplicates in `queue`.
|
||||
// A key is in `queue` if and only if it is in `items`.
|
||||
queue []string
|
||||
|
||||
// populated is true if the first batch of items inserted by Replace() has been populated
|
||||
// or Delete/Add/Update/AddIfNotPresent was called first.
|
||||
populated bool
|
||||
// initialPopulationCount is the number of items inserted by the first call of Replace()
|
||||
initialPopulationCount int
|
||||
|
||||
// keyFunc is used to make the key used for queued item
|
||||
// insertion and retrieval, and should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
|
||||
// knownObjects list keys that are "known" --- affecting Delete(),
|
||||
// Replace(), and Resync()
|
||||
knownObjects KeyListerGetter
|
||||
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
|
||||
// emitDeltaTypeReplaced is whether to emit the Replaced or Sync
|
||||
// DeltaType when Replace() is called (to preserve backwards compat).
|
||||
emitDeltaTypeReplaced bool
|
||||
}
|
||||
|
||||
// DeltaType is the type of a change (addition, deletion, etc)
|
||||
type DeltaType string
|
||||
|
||||
// Change type definition
|
||||
const (
|
||||
Added DeltaType = "Added"
|
||||
Updated DeltaType = "Updated"
|
||||
Deleted DeltaType = "Deleted"
|
||||
// Replaced is emitted when we encountered watch errors and had to do a
|
||||
// relist. We don't know if the replaced object has changed.
|
||||
//
|
||||
// NOTE: Previous versions of DeltaFIFO would use Sync for Replace events
|
||||
// as well. Hence, Replaced is only emitted when the option
|
||||
// EmitDeltaTypeReplaced is true.
|
||||
Replaced DeltaType = "Replaced"
|
||||
// Sync is for synthetic events during a periodic resync.
|
||||
Sync DeltaType = "Sync"
|
||||
)
|
||||
|
||||
// Delta is a member of Deltas (a list of Delta objects) which
|
||||
// in its turn is the type stored by a DeltaFIFO. It tells you what
|
||||
// change happened, and the object's state after* that change.
|
||||
//
|
||||
// [*] Unless the change is a deletion, and then you'll get the final
|
||||
// state of the object before it was deleted.
|
||||
type Delta struct {
|
||||
Type DeltaType
|
||||
Object interface{}
|
||||
}
|
||||
|
||||
// Deltas is a list of one or more 'Delta's to an individual object.
|
||||
// The oldest delta is at index 0, the newest delta is the last one.
|
||||
type Deltas []Delta
|
||||
|
||||
// NewDeltaFIFO returns a Queue which can be used to process changes to items.
|
||||
//
|
||||
// keyFunc is used to figure out what key an object should have. (It is
|
||||
// exposed in the returned DeltaFIFO's KeyOf() method, with additional handling
|
||||
// around deleted objects and queue state).
|
||||
//
|
||||
// 'knownObjects' may be supplied to modify the behavior of Delete,
|
||||
// Replace, and Resync. It may be nil if you do not need those
|
||||
// modifications.
|
||||
//
|
||||
// 'keyLister' is expected to return a list of keys that the consumer of
|
||||
// this queue "knows about". It is used to decide which items are missing
|
||||
// when Replace() is called; 'Deleted' deltas are produced for these items.
|
||||
// It may be nil if you don't need to detect all deletions.
|
||||
// TODO: consider merging keyLister with this object, tracking a list of
|
||||
// "known" keys when Pop() is called. Have to think about how that
|
||||
// affects error retrying.
|
||||
// NOTE: It is possible to misuse this and cause a race when using an
|
||||
// external known object source.
|
||||
// Whether there is a potential race depends on how the comsumer
|
||||
// Whether there is a potential race depends on how the consumer
|
||||
// modifies knownObjects. In Pop(), process function is called under
|
||||
// lock, so it is safe to update data structures in it that need to be
|
||||
// in sync with the queue (e.g. knownObjects).
|
||||
@@ -56,76 +194,42 @@ import (
|
||||
// and internal tests.
|
||||
//
|
||||
// Also see the comment on DeltaFIFO.
|
||||
//
|
||||
// Warning: This constructs a DeltaFIFO that does not differentiate between
|
||||
// events caused by a call to Replace (e.g., from a relist, which may
|
||||
// contain object updates), and synthetic events caused by a periodic resync
|
||||
// (which just emit the existing object). See https://issue.k8s.io/86015 for details.
|
||||
//
|
||||
// Use `NewDeltaFIFOWithOptions(DeltaFIFOOptions{..., EmitDeltaTypeReplaced: true})`
|
||||
// instead to receive a `Replaced` event depending on the type.
|
||||
//
|
||||
// Deprecated: Equivalent to NewDeltaFIFOWithOptions(DeltaFIFOOptions{KeyFunction: keyFunc, KnownObjects: knownObjects})
|
||||
func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO {
|
||||
return NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KeyFunction: keyFunc,
|
||||
KnownObjects: knownObjects,
|
||||
})
|
||||
}
|
||||
|
||||
// NewDeltaFIFOWithOptions returns a Queue which can be used to process changes to
|
||||
// items. See also the comment on DeltaFIFO.
|
||||
func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
|
||||
if opts.KeyFunction == nil {
|
||||
opts.KeyFunction = MetaNamespaceKeyFunc
|
||||
}
|
||||
|
||||
f := &DeltaFIFO{
|
||||
items: map[string]Deltas{},
|
||||
queue: []string{},
|
||||
keyFunc: keyFunc,
|
||||
knownObjects: knownObjects,
|
||||
keyFunc: opts.KeyFunction,
|
||||
knownObjects: opts.KnownObjects,
|
||||
|
||||
emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced,
|
||||
}
|
||||
f.cond.L = &f.lock
|
||||
return f
|
||||
}
|
||||
|
||||
// DeltaFIFO is like FIFO, but allows you to process deletes.
|
||||
//
|
||||
// DeltaFIFO is a producer-consumer queue, where a Reflector is
|
||||
// intended to be the producer, and the consumer is whatever calls
|
||||
// the Pop() method.
|
||||
//
|
||||
// DeltaFIFO solves this use case:
|
||||
// * You want to process every object change (delta) at most once.
|
||||
// * When you process an object, you want to see everything
|
||||
// that's happened to it since you last processed it.
|
||||
// * You want to process the deletion of objects.
|
||||
// * You might want to periodically reprocess objects.
|
||||
//
|
||||
// DeltaFIFO's Pop(), Get(), and GetByKey() methods return
|
||||
// interface{} to satisfy the Store/Queue interfaces, but it
|
||||
// will always return an object of type Deltas.
|
||||
//
|
||||
// A note on threading: If you call Pop() in parallel from multiple
|
||||
// threads, you could end up with multiple threads processing slightly
|
||||
// different versions of the same object.
|
||||
//
|
||||
// A note on the KeyLister used by the DeltaFIFO: It's main purpose is
|
||||
// to list keys that are "known", for the purpose of figuring out which
|
||||
// items have been deleted when Replace() or Delete() are called. The deleted
|
||||
// object will be included in the DeleteFinalStateUnknown markers. These objects
|
||||
// could be stale.
|
||||
type DeltaFIFO struct {
|
||||
// lock/cond protects access to 'items' and 'queue'.
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// We depend on the property that items in the set are in
|
||||
// the queue and vice versa, and that all Deltas in this
|
||||
// map have at least one Delta.
|
||||
items map[string]Deltas
|
||||
queue []string
|
||||
|
||||
// populated is true if the first batch of items inserted by Replace() has been populated
|
||||
// or Delete/Add/Update was called first.
|
||||
populated bool
|
||||
// initialPopulationCount is the number of items inserted by the first call of Replace()
|
||||
initialPopulationCount int
|
||||
|
||||
// keyFunc is used to make the key used for queued item
|
||||
// insertion and retrieval, and should be deterministic.
|
||||
keyFunc KeyFunc
|
||||
|
||||
// knownObjects list keys that are "known", for the
|
||||
// purpose of figuring out which items have been deleted
|
||||
// when Replace() or Delete() is called.
|
||||
knownObjects KeyListerGetter
|
||||
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
closedLock sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
|
||||
)
|
||||
@@ -139,8 +243,8 @@ var (
|
||||
|
||||
// Close the queue.
|
||||
func (f *DeltaFIFO) Close() {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.closed = true
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
@@ -160,8 +264,8 @@ func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) {
|
||||
return f.keyFunc(obj)
|
||||
}
|
||||
|
||||
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or the first batch of items inserted by Replace() has been popped.
|
||||
func (f *DeltaFIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
@@ -185,9 +289,11 @@ func (f *DeltaFIFO) Update(obj interface{}) error {
|
||||
return f.queueActionLocked(Updated, obj)
|
||||
}
|
||||
|
||||
// Delete is just like Add, but makes an Deleted Delta. If the item does not
|
||||
// already exist, it will be ignored. (It may have already been deleted by a
|
||||
// Replace (re-list), for example.
|
||||
// Delete is just like Add, but makes a Deleted Delta. If the given
|
||||
// object does not already exist, it will be ignored. (It may have
|
||||
// already been deleted by a Replace (re-list), for example.) In this
|
||||
// method `f.knownObjects`, if not nil, provides (via GetByKey)
|
||||
// _additional_ objects that are considered to already exist.
|
||||
func (f *DeltaFIFO) Delete(obj interface{}) error {
|
||||
id, err := f.KeyOf(obj)
|
||||
if err != nil {
|
||||
@@ -216,6 +322,7 @@ func (f *DeltaFIFO) Delete(obj interface{}) error {
|
||||
}
|
||||
}
|
||||
|
||||
// exist in items and/or KnownObjects
|
||||
return f.queueActionLocked(Deleted, obj)
|
||||
}
|
||||
|
||||
@@ -266,6 +373,11 @@ func dedupDeltas(deltas Deltas) Deltas {
|
||||
a := &deltas[n-1]
|
||||
b := &deltas[n-2]
|
||||
if out := isDup(a, b); out != nil {
|
||||
// `a` and `b` are duplicates. Only keep the one returned from isDup().
|
||||
// TODO: This extra array allocation and copy seems unnecessary if
|
||||
// all we do to dedup is compare the new delta with the last element
|
||||
// in `items`, which could be done by mutating `items` directly.
|
||||
// Might be worth profiling and investigating if it is safe to optimize.
|
||||
d := append(Deltas{}, deltas[:n-2]...)
|
||||
return append(d, *out)
|
||||
}
|
||||
@@ -295,13 +407,6 @@ func isDeletionDup(a, b *Delta) *Delta {
|
||||
return b
|
||||
}
|
||||
|
||||
// willObjectBeDeletedLocked returns true only if the last delta for the
|
||||
// given object is Delete. Caller must lock first.
|
||||
func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
|
||||
deltas := f.items[id]
|
||||
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
|
||||
}
|
||||
|
||||
// queueActionLocked appends to the delta list for the object.
|
||||
// Caller must lock first.
|
||||
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
|
||||
@@ -309,15 +414,8 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
if err != nil {
|
||||
return KeyError{obj, err}
|
||||
}
|
||||
|
||||
// If object is supposed to be deleted (last event is Deleted),
|
||||
// then we should ignore Sync events, because it would result in
|
||||
// recreation of this object.
|
||||
if actionType == Sync && f.willObjectBeDeletedLocked(id) {
|
||||
return nil
|
||||
}
|
||||
|
||||
newDeltas := append(f.items[id], Delta{actionType, obj})
|
||||
oldDeltas := f.items[id]
|
||||
newDeltas := append(oldDeltas, Delta{actionType, obj})
|
||||
newDeltas = dedupDeltas(newDeltas)
|
||||
|
||||
if len(newDeltas) > 0 {
|
||||
@@ -327,9 +425,16 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
|
||||
f.items[id] = newDeltas
|
||||
f.cond.Broadcast()
|
||||
} else {
|
||||
// We need to remove this from our map (extra items in the queue are
|
||||
// ignored if they are not in the map).
|
||||
delete(f.items, id)
|
||||
// This never happens, because dedupDeltas never returns an empty list
|
||||
// when given a non-empty list (as it is here).
|
||||
// If somehow it happens anyway, deal with it but complain.
|
||||
if oldDeltas == nil {
|
||||
klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; ignoring", id, oldDeltas, obj)
|
||||
return nil
|
||||
}
|
||||
klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; breaking invariant by storing empty Deltas", id, oldDeltas, obj)
|
||||
f.items[id] = newDeltas
|
||||
return fmt.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; broke DeltaFIFO invariant by storing empty Deltas", id, oldDeltas, obj)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -389,22 +494,24 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err
|
||||
return d, exists, nil
|
||||
}
|
||||
|
||||
// Checks if the queue is closed
|
||||
// IsClosed checks if the queue is closed
|
||||
func (f *DeltaFIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
return f.closed
|
||||
}
|
||||
|
||||
// Pop blocks until an item is added to the queue, and then returns it. If
|
||||
// Pop blocks until the queue has some items, and then returns one. If
|
||||
// multiple items are ready, they are returned in the order in which they were
|
||||
// added/updated. The item is removed from the queue (and the store) before it
|
||||
// is returned, so if you don't successfully process it, you need to add it back
|
||||
// with AddIfNotPresent().
|
||||
// process function is called under lock, so it is safe update data structures
|
||||
// process function is called under lock, so it is safe to update data structures
|
||||
// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc
|
||||
// may return an instance of ErrRequeue with a nested error to indicate the current
|
||||
// item should be requeued (equivalent to calling AddIfNotPresent under the lock).
|
||||
// process should avoid expensive I/O operation so that other queue operations, i.e.
|
||||
// Add() and Get(), won't be blocked for too long.
|
||||
//
|
||||
// Pop returns a 'Deltas', which has a complete list of all the things
|
||||
// that happened to the object (deltas) while it was sitting in the queue.
|
||||
@@ -416,8 +523,8 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
return nil, FIFOClosedError
|
||||
if f.closed {
|
||||
return nil, ErrFIFOClosed
|
||||
}
|
||||
|
||||
f.cond.Wait()
|
||||
@@ -429,7 +536,8 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
}
|
||||
item, ok := f.items[id]
|
||||
if !ok {
|
||||
// Item may have been deleted subsequently.
|
||||
// This should never happen
|
||||
klog.Errorf("Inconceivable! %q was in f.queue but not f.items; ignoring.", id)
|
||||
continue
|
||||
}
|
||||
delete(f.items, id)
|
||||
@@ -444,22 +552,35 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Replace will delete the contents of 'f', using instead the given map.
|
||||
// 'f' takes ownership of the map, you should not reference the map again
|
||||
// after calling this function. f's queue is reset, too; upon return, it
|
||||
// will contain the items in the map, in no particular order.
|
||||
// Replace atomically does two things: (1) it adds the given objects
|
||||
// using the Sync or Replace DeltaType and then (2) it does some deletions.
|
||||
// In particular: for every pre-existing key K that is not the key of
|
||||
// an object in `list` there is the effect of
|
||||
// `Delete(DeletedFinalStateUnknown{K, O})` where O is current object
|
||||
// of K. If `f.knownObjects == nil` then the pre-existing keys are
|
||||
// those in `f.items` and the current object of K is the `.Newest()`
|
||||
// of the Deltas associated with K. Otherwise the pre-existing keys
|
||||
// are those listed by `f.knownObjects` and the current object of K is
|
||||
// what `f.knownObjects.GetByKey(K)` returns.
|
||||
func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
keys := make(sets.String, len(list))
|
||||
|
||||
// keep backwards compat for old clients
|
||||
action := Sync
|
||||
if f.emitDeltaTypeReplaced {
|
||||
action = Replaced
|
||||
}
|
||||
|
||||
// Add Sync/Replaced action for each new item.
|
||||
for _, item := range list {
|
||||
key, err := f.KeyOf(item)
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
keys.Insert(key)
|
||||
if err := f.queueActionLocked(Sync, item); err != nil {
|
||||
if err := f.queueActionLocked(action, item); err != nil {
|
||||
return fmt.Errorf("couldn't enqueue object: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -471,6 +592,9 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
if keys.Has(k) {
|
||||
continue
|
||||
}
|
||||
// Delete pre-existing items not in the new list.
|
||||
// This could happen if watch deletion event was missed while
|
||||
// disconnected from apiserver.
|
||||
var deletedObj interface{}
|
||||
if n := oldItem.Newest(); n != nil {
|
||||
deletedObj = n.Object
|
||||
@@ -485,7 +609,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
f.populated = true
|
||||
// While there shouldn't be any queued deletions in the initial
|
||||
// population of the queue, it's better to be on the safe side.
|
||||
f.initialPopulationCount = len(list) + queuedDeletions
|
||||
f.initialPopulationCount = keys.Len() + queuedDeletions
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -515,13 +639,15 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
|
||||
if !f.populated {
|
||||
f.populated = true
|
||||
f.initialPopulationCount = len(list) + queuedDeletions
|
||||
f.initialPopulationCount = keys.Len() + queuedDeletions
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync will send a sync event for each item
|
||||
// Resync adds, with a Sync type of Delta, every object listed by
|
||||
// `f.knownObjects` whose key is not already queued for processing.
|
||||
// If `f.knownObjects` is `nil` then Resync does nothing.
|
||||
func (f *DeltaFIFO) Resync() error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
@@ -539,13 +665,6 @@ func (f *DeltaFIFO) Resync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) syncKey(key string) error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
return f.syncKeyLocked(key)
|
||||
}
|
||||
|
||||
func (f *DeltaFIFO) syncKeyLocked(key string) error {
|
||||
obj, exists, err := f.knownObjects.GetByKey(key)
|
||||
if err != nil {
|
||||
@@ -587,37 +706,10 @@ type KeyLister interface {
|
||||
|
||||
// A KeyGetter is anything that knows how to get the value stored under a given key.
|
||||
type KeyGetter interface {
|
||||
GetByKey(key string) (interface{}, bool, error)
|
||||
// GetByKey returns the value associated with the key, or sets exists=false.
|
||||
GetByKey(key string) (value interface{}, exists bool, err error)
|
||||
}
|
||||
|
||||
// DeltaType is the type of a change (addition, deletion, etc)
|
||||
type DeltaType string
|
||||
|
||||
const (
|
||||
Added DeltaType = "Added"
|
||||
Updated DeltaType = "Updated"
|
||||
Deleted DeltaType = "Deleted"
|
||||
// The other types are obvious. You'll get Sync deltas when:
|
||||
// * A watch expires/errors out and a new list/watch cycle is started.
|
||||
// * You've turned on periodic syncs.
|
||||
// (Anything that trigger's DeltaFIFO's Replace() method.)
|
||||
Sync DeltaType = "Sync"
|
||||
)
|
||||
|
||||
// Delta is the type stored by a DeltaFIFO. It tells you what change
|
||||
// happened, and the object's state after* that change.
|
||||
//
|
||||
// [*] Unless the change is a deletion, and then you'll get the final
|
||||
// state of the object before it was deleted.
|
||||
type Delta struct {
|
||||
Type DeltaType
|
||||
Object interface{}
|
||||
}
|
||||
|
||||
// Deltas is a list of one or more 'Delta's to an individual object.
|
||||
// The oldest delta is at index 0, the newest delta is the last one.
|
||||
type Deltas []Delta
|
||||
|
||||
// Oldest is a convenience function that returns the oldest delta, or
|
||||
// nil if there are no deltas.
|
||||
func (d Deltas) Oldest() *Delta {
|
||||
@@ -645,10 +737,10 @@ func copyDeltas(d Deltas) Deltas {
|
||||
return d2
|
||||
}
|
||||
|
||||
// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where
|
||||
// an object was deleted but the watch deletion event was missed. In this
|
||||
// case we don't know the final "resting" state of the object, so there's
|
||||
// a chance the included `Obj` is stale.
|
||||
// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where an object
|
||||
// was deleted but the watch deletion event was missed while disconnected from
|
||||
// apiserver. In this case we don't know the final "resting" state of the object, so
|
||||
// there's a chance the included `Obj` is stale.
|
||||
type DeletedFinalStateUnknown struct {
|
||||
Key string
|
||||
Obj interface{}
|
||||
|
55
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
55
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
@@ -21,7 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
@@ -48,14 +48,14 @@ type ExpirationCache struct {
|
||||
// ExpirationPolicy dictates when an object expires. Currently only abstracted out
|
||||
// so unittests don't rely on the system clock.
|
||||
type ExpirationPolicy interface {
|
||||
IsExpired(obj *timestampedEntry) bool
|
||||
IsExpired(obj *TimestampedEntry) bool
|
||||
}
|
||||
|
||||
// TTLPolicy implements a ttl based ExpirationPolicy.
|
||||
type TTLPolicy struct {
|
||||
// >0: Expire entries with an age > ttl
|
||||
// <=0: Don't expire any entry
|
||||
Ttl time.Duration
|
||||
TTL time.Duration
|
||||
|
||||
// Clock used to calculate ttl expiration
|
||||
Clock clock.Clock
|
||||
@@ -63,26 +63,30 @@ type TTLPolicy struct {
|
||||
|
||||
// IsExpired returns true if the given object is older than the ttl, or it can't
|
||||
// determine its age.
|
||||
func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
|
||||
func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool {
|
||||
return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL
|
||||
}
|
||||
|
||||
// timestampedEntry is the only type allowed in a ExpirationCache.
|
||||
type timestampedEntry struct {
|
||||
obj interface{}
|
||||
timestamp time.Time
|
||||
// TimestampedEntry is the only type allowed in a ExpirationCache.
|
||||
// Keep in mind that it is not safe to share timestamps between computers.
|
||||
// Behavior may be inconsistent if you get a timestamp from the API Server and
|
||||
// use it on the client machine as part of your ExpirationCache.
|
||||
type TimestampedEntry struct {
|
||||
Obj interface{}
|
||||
Timestamp time.Time
|
||||
key string
|
||||
}
|
||||
|
||||
// getTimestampedEntry returns the timestampedEntry stored under the given key.
|
||||
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
|
||||
// getTimestampedEntry returns the TimestampedEntry stored under the given key.
|
||||
func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) {
|
||||
item, _ := c.cacheStorage.Get(key)
|
||||
if tsEntry, ok := item.(*timestampedEntry); ok {
|
||||
if tsEntry, ok := item.(*TimestampedEntry); ok {
|
||||
return tsEntry, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
|
||||
// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't
|
||||
// already expired. It holds a write lock across deletion.
|
||||
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
||||
// Prevent all inserts from the time we deem an item as "expired" to when we
|
||||
@@ -95,11 +99,11 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
|
||||
return nil, false
|
||||
}
|
||||
if c.expirationPolicy.IsExpired(timestampedItem) {
|
||||
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
|
||||
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj)
|
||||
c.cacheStorage.Delete(key)
|
||||
return nil, false
|
||||
}
|
||||
return timestampedItem.obj, true
|
||||
return timestampedItem.Obj, true
|
||||
}
|
||||
|
||||
// GetByKey returns the item stored under the key, or sets exists=false.
|
||||
@@ -126,10 +130,8 @@ func (c *ExpirationCache) List() []interface{} {
|
||||
|
||||
list := make([]interface{}, 0, len(items))
|
||||
for _, item := range items {
|
||||
obj := item.(*timestampedEntry).obj
|
||||
if key, err := c.keyFunc(obj); err != nil {
|
||||
list = append(list, obj)
|
||||
} else if obj, exists := c.getOrExpire(key); exists {
|
||||
key := item.(*TimestampedEntry).key
|
||||
if obj, exists := c.getOrExpire(key); exists {
|
||||
list = append(list, obj)
|
||||
}
|
||||
}
|
||||
@@ -151,7 +153,7 @@ func (c *ExpirationCache) Add(obj interface{}) error {
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
|
||||
c.cacheStorage.Add(key, ×tampedEntry{obj, c.clock.Now()})
|
||||
c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -184,7 +186,7 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er
|
||||
if err != nil {
|
||||
return KeyError{item, err}
|
||||
}
|
||||
items[key] = ×tampedEntry{item, ts}
|
||||
items[key] = &TimestampedEntry{item, ts, key}
|
||||
}
|
||||
c.expirationLock.Lock()
|
||||
defer c.expirationLock.Unlock()
|
||||
@@ -192,17 +194,22 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync will touch all objects to put them into the processing queue
|
||||
// Resync is a no-op for one of these
|
||||
func (c *ExpirationCache) Resync() error {
|
||||
return c.cacheStorage.Resync()
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
|
||||
func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
|
||||
return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}})
|
||||
}
|
||||
|
||||
// NewExpirationStore creates and returns a ExpirationCache for a given policy
|
||||
func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store {
|
||||
return &ExpirationCache{
|
||||
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
|
||||
keyFunc: keyFunc,
|
||||
clock: clock.RealClock{},
|
||||
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
|
||||
expirationPolicy: expirationPolicy,
|
||||
}
|
||||
}
|
||||
|
5
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
5
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
@@ -33,16 +33,19 @@ func (c *fakeThreadSafeMap) Delete(key string) {
|
||||
}
|
||||
}
|
||||
|
||||
// FakeExpirationPolicy keeps the list for keys which never expires.
|
||||
type FakeExpirationPolicy struct {
|
||||
NeverExpire sets.String
|
||||
RetrieveKeyFunc KeyFunc
|
||||
}
|
||||
|
||||
func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
|
||||
// IsExpired used to check if object is expired.
|
||||
func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool {
|
||||
key, _ := p.RetrieveKeyFunc(obj)
|
||||
return !p.NeverExpire.Has(key)
|
||||
}
|
||||
|
||||
// NewFakeExpirationStore creates a new instance for the ExpirationCache.
|
||||
func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store {
|
||||
cacheStorage := NewThreadSafeStore(Indexers{}, Indices{})
|
||||
return &ExpirationCache{
|
||||
|
4
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/fake_custom_store.go
generated
vendored
@@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
// FakeStore lets you define custom functions for store operations
|
||||
// FakeCustomStore lets you define custom functions for store operations.
|
||||
type FakeCustomStore struct {
|
||||
AddFunc func(obj interface{}) error
|
||||
UpdateFunc func(obj interface{}) error
|
||||
@@ -25,7 +25,7 @@ type FakeCustomStore struct {
|
||||
ListKeysFunc func() []string
|
||||
GetFunc func(obj interface{}) (item interface{}, exists bool, err error)
|
||||
GetByKeyFunc func(key string) (item interface{}, exists bool, err error)
|
||||
ReplaceFunc func(list []interface{}, resourceVerion string) error
|
||||
ReplaceFunc func(list []interface{}, resourceVersion string) error
|
||||
ResyncFunc func() error
|
||||
}
|
||||
|
||||
|
82
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
82
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
// PopProcessFunc is passed to Pop() method of Queue interface.
|
||||
// It is supposed to process the element popped from the queue.
|
||||
// It is supposed to process the accumulator popped from the queue.
|
||||
type PopProcessFunc func(interface{}) error
|
||||
|
||||
// ErrRequeue may be returned by a PopProcessFunc to safely requeue
|
||||
@@ -34,7 +34,8 @@ type ErrRequeue struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue")
|
||||
// ErrFIFOClosed used when FIFO is closed
|
||||
var ErrFIFOClosed = errors.New("DeltaFIFO: manipulating with closed queue")
|
||||
|
||||
func (e ErrRequeue) Error() string {
|
||||
if e.Err == nil {
|
||||
@@ -43,30 +44,42 @@ func (e ErrRequeue) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// Queue is exactly like a Store, but has a Pop() method too.
|
||||
// Queue extends Store with a collection of Store keys to "process".
|
||||
// Every Add, Update, or Delete may put the object's key in that collection.
|
||||
// A Queue has a way to derive the corresponding key given an accumulator.
|
||||
// A Queue can be accessed concurrently from multiple goroutines.
|
||||
// A Queue can be "closed", after which Pop operations return an error.
|
||||
type Queue interface {
|
||||
Store
|
||||
|
||||
// Pop blocks until it has something to process.
|
||||
// It returns the object that was process and the result of processing.
|
||||
// The PopProcessFunc may return an ErrRequeue{...} to indicate the item
|
||||
// should be requeued before releasing the lock on the queue.
|
||||
// Pop blocks until there is at least one key to process or the
|
||||
// Queue is closed. In the latter case Pop returns with an error.
|
||||
// In the former case Pop atomically picks one key to process,
|
||||
// removes that (key, accumulator) association from the Store, and
|
||||
// processes the accumulator. Pop returns the accumulator that
|
||||
// was processed and the result of processing. The PopProcessFunc
|
||||
// may return an ErrRequeue{inner} and in this case Pop will (a)
|
||||
// return that (key, accumulator) association to the Queue as part
|
||||
// of the atomic processing and (b) return the inner error from
|
||||
// Pop.
|
||||
Pop(PopProcessFunc) (interface{}, error)
|
||||
|
||||
// AddIfNotPresent adds a value previously
|
||||
// returned by Pop back into the queue as long
|
||||
// as nothing else (presumably more recent)
|
||||
// has since been added.
|
||||
// AddIfNotPresent puts the given accumulator into the Queue (in
|
||||
// association with the accumulator's key) if and only if that key
|
||||
// is not already associated with a non-empty accumulator.
|
||||
AddIfNotPresent(interface{}) error
|
||||
|
||||
// HasSynced returns true if the first batch of items has been popped
|
||||
// HasSynced returns true if the first batch of keys have all been
|
||||
// popped. The first batch of keys are those of the first Replace
|
||||
// operation if that happened before any Add, AddIfNotPresent,
|
||||
// Update, or Delete; otherwise the first batch is empty.
|
||||
HasSynced() bool
|
||||
|
||||
// Close queue
|
||||
// Close the queue
|
||||
Close()
|
||||
}
|
||||
|
||||
// Helper function for popping from Queue.
|
||||
// Pop is helper function for popping from Queue.
|
||||
// WARNING: Do NOT use this function in non-test code to avoid races
|
||||
// unless you really really really really know what you are doing.
|
||||
func Pop(queue Queue) interface{} {
|
||||
@@ -78,11 +91,16 @@ func Pop(queue Queue) interface{} {
|
||||
return result
|
||||
}
|
||||
|
||||
// FIFO receives adds and updates from a Reflector, and puts them in a queue for
|
||||
// FIFO order processing. If multiple adds/updates of a single item happen while
|
||||
// an item is in the queue before it has been processed, it will only be
|
||||
// processed once, and when it is processed, the most recent version will be
|
||||
// processed. This can't be done with a channel.
|
||||
// FIFO is a Queue in which (a) each accumulator is simply the most
|
||||
// recently provided object and (b) the collection of keys to process
|
||||
// is a FIFO. The accumulators all start out empty, and deleting an
|
||||
// object from its accumulator empties the accumulator. The Resync
|
||||
// operation is a no-op.
|
||||
//
|
||||
// Thus: if multiple adds/updates of a single object happen while that
|
||||
// object's key is in the queue before it has been processed then it
|
||||
// will only be processed once, and when it is processed the most
|
||||
// recent version will be processed. This can't be done with a channel
|
||||
//
|
||||
// FIFO solves this use case:
|
||||
// * You want to process every object (exactly) once.
|
||||
@@ -93,7 +111,7 @@ func Pop(queue Queue) interface{} {
|
||||
type FIFO struct {
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
// We depend on the property that items in the set are in the queue and vice versa.
|
||||
// We depend on the property that every key in `items` is also in `queue`
|
||||
items map[string]interface{}
|
||||
queue []string
|
||||
|
||||
@@ -110,8 +128,7 @@ type FIFO struct {
|
||||
// Indication the queue is closed.
|
||||
// Used to indicate a queue is closed so a control loop can exit when a queue is empty.
|
||||
// Currently, not used to gate any of CRED operations.
|
||||
closed bool
|
||||
closedLock sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -120,14 +137,14 @@ var (
|
||||
|
||||
// Close the queue.
|
||||
func (f *FIFO) Close() {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.closed = true
|
||||
f.cond.Broadcast()
|
||||
}
|
||||
|
||||
// Return true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or an Update called first but the first batch of items inserted by Replace() has been popped
|
||||
// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first,
|
||||
// or the first batch of items inserted by Replace() has been popped.
|
||||
func (f *FIFO) HasSynced() bool {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
@@ -242,10 +259,10 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
|
||||
return item, exists, nil
|
||||
}
|
||||
|
||||
// Checks if the queue is closed
|
||||
// IsClosed checks if the queue is closed
|
||||
func (f *FIFO) IsClosed() bool {
|
||||
f.closedLock.Lock()
|
||||
defer f.closedLock.Unlock()
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
if f.closed {
|
||||
return true
|
||||
}
|
||||
@@ -266,8 +283,8 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {
|
||||
// When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
|
||||
// When Close() is called, the f.closed is set and the condition is broadcasted.
|
||||
// Which causes this loop to continue and return from the Pop().
|
||||
if f.IsClosed() {
|
||||
return nil, FIFOClosedError
|
||||
if f.closed {
|
||||
return nil, ErrFIFOClosed
|
||||
}
|
||||
|
||||
f.cond.Wait()
|
||||
@@ -325,7 +342,8 @@ func (f *FIFO) Replace(list []interface{}, resourceVersion string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync will touch all objects to put them into the processing queue
|
||||
// Resync will ensure that every object in the Store has its key in the queue.
|
||||
// This should be a no-op, because that property is maintained by all operations.
|
||||
func (f *FIFO) Resync() error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
10
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
10
vendor/k8s.io/client-go/tools/cache/heap.go
generated
vendored
@@ -28,7 +28,9 @@ const (
|
||||
closedMsg = "heap is closed"
|
||||
)
|
||||
|
||||
// LessFunc is used to compare two objects in the heap.
|
||||
type LessFunc func(interface{}, interface{}) bool
|
||||
|
||||
type heapItem struct {
|
||||
obj interface{} // The object which is stored in the heap.
|
||||
index int // The index of the object's key in the Heap.queue.
|
||||
@@ -158,7 +160,7 @@ func (h *Heap) Add(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds all the items in the list to the queue and then signals the condition
|
||||
// BulkAdd adds all the items in the list to the queue and then signals the condition
|
||||
// variable. It is useful when the caller would like to add all of the items
|
||||
// to the queue before consumer starts processing them.
|
||||
func (h *Heap) BulkAdd(list []interface{}) error {
|
||||
@@ -249,11 +251,11 @@ func (h *Heap) Pop() (interface{}, error) {
|
||||
h.cond.Wait()
|
||||
}
|
||||
obj := heap.Pop(h.data)
|
||||
if obj != nil {
|
||||
return obj, nil
|
||||
} else {
|
||||
if obj == nil {
|
||||
return nil, fmt.Errorf("object was removed from heap data")
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// List returns a list of all the items.
|
||||
|
32
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
32
vendor/k8s.io/client-go/tools/cache/index.go
generated
vendored
@@ -23,17 +23,30 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// Indexer is a storage interface that lets you list objects using multiple indexing functions
|
||||
// Indexer extends Store with multiple indices and restricts each
|
||||
// accumulator to simply hold the current object (and be empty after
|
||||
// Delete).
|
||||
//
|
||||
// There are three kinds of strings here:
|
||||
// 1. a storage key, as defined in the Store interface,
|
||||
// 2. a name of an index, and
|
||||
// 3. an "indexed value", which is produced by an IndexFunc and
|
||||
// can be a field value or any other string computed from the object.
|
||||
type Indexer interface {
|
||||
Store
|
||||
// Retrieve list of objects that match on the named indexing function
|
||||
// Index returns the stored objects whose set of indexed values
|
||||
// intersects the set of indexed values of the given object, for
|
||||
// the named index
|
||||
Index(indexName string, obj interface{}) ([]interface{}, error)
|
||||
// IndexKeys returns the set of keys that match on the named indexing function.
|
||||
IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
// ListIndexFuncValues returns the list of generated values of an Index func
|
||||
// IndexKeys returns the storage keys of the stored objects whose
|
||||
// set of indexed values for the named index includes the given
|
||||
// indexed value
|
||||
IndexKeys(indexName, indexedValue string) ([]string, error)
|
||||
// ListIndexFuncValues returns all the indexed values of the given index
|
||||
ListIndexFuncValues(indexName string) []string
|
||||
// ByIndex lists object that match on the named indexing function with the exact key
|
||||
ByIndex(indexName, indexKey string) ([]interface{}, error)
|
||||
// ByIndex returns the stored objects whose set of indexed values
|
||||
// for the named index includes the given indexed value
|
||||
ByIndex(indexName, indexedValue string) ([]interface{}, error)
|
||||
// GetIndexer return the indexers
|
||||
GetIndexers() Indexers
|
||||
|
||||
@@ -42,11 +55,11 @@ type Indexer interface {
|
||||
AddIndexers(newIndexers Indexers) error
|
||||
}
|
||||
|
||||
// IndexFunc knows how to provide an indexed value for an object.
|
||||
// IndexFunc knows how to compute the set of indexed values for an object.
|
||||
type IndexFunc func(obj interface{}) ([]string, error)
|
||||
|
||||
// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns
|
||||
// unique values for every object. This is conversion can create errors when more than one key is found. You
|
||||
// unique values for every object. This conversion can create errors when more than one key is found. You
|
||||
// should prefer to make proper key and index functions.
|
||||
func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
|
||||
return func(obj interface{}) (string, error) {
|
||||
@@ -65,6 +78,7 @@ func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc {
|
||||
}
|
||||
|
||||
const (
|
||||
// NamespaceIndex is the lookup name for the most comment index function, which is to index by the namespace field.
|
||||
NamespaceIndex string = "namespace"
|
||||
)
|
||||
|
||||
|
5
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
5
vendor/k8s.io/client-go/tools/cache/listers.go
generated
vendored
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
// AppendFunc is used to add a matching item to whatever list the caller is using
|
||||
type AppendFunc func(interface{})
|
||||
|
||||
// ListAll calls appendFn with each value retrieved from store which matches the selector.
|
||||
func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
for _, m := range store.List() {
|
||||
@@ -50,6 +51,7 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListAllByNamespace used to list items belongs to namespace from Indexer.
|
||||
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
|
||||
selectAll := selector.Empty()
|
||||
if namespace == metav1.NamespaceAll {
|
||||
@@ -124,6 +126,7 @@ type GenericNamespaceLister interface {
|
||||
Get(name string) (runtime.Object, error)
|
||||
}
|
||||
|
||||
// NewGenericLister creates a new instance for the genericLister.
|
||||
func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister {
|
||||
return &genericLister{indexer: indexer, resource: resource}
|
||||
}
|
||||
|
10
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
10
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
@@ -24,7 +24,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
)
|
||||
|
||||
// Lister is any object that knows how to perform an initial list.
|
||||
@@ -85,7 +84,7 @@ func NewFilteredListWatchFromClient(c Getter, resource string, namespace string,
|
||||
Namespace(namespace).
|
||||
Resource(resource).
|
||||
VersionedParams(&options, metav1.ParameterCodec).
|
||||
Do().
|
||||
Do(context.TODO()).
|
||||
Get()
|
||||
}
|
||||
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
@@ -95,16 +94,15 @@ func NewFilteredListWatchFromClient(c Getter, resource string, namespace string,
|
||||
Namespace(namespace).
|
||||
Resource(resource).
|
||||
VersionedParams(&options, metav1.ParameterCodec).
|
||||
Watch()
|
||||
Watch(context.TODO())
|
||||
}
|
||||
return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
|
||||
}
|
||||
|
||||
// List a set of apiserver resources
|
||||
func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if !lw.DisableChunking {
|
||||
return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options)
|
||||
}
|
||||
// ListWatch is used in Reflector, which already supports pagination.
|
||||
// Don't paginate here to avoid duplication.
|
||||
return lw.ListFunc(options)
|
||||
}
|
||||
|
||||
|
3
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
3
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -42,6 +42,7 @@ type MutationCache interface {
|
||||
Mutation(interface{})
|
||||
}
|
||||
|
||||
// ResourceVersionComparator is able to compare object versions.
|
||||
type ResourceVersionComparator interface {
|
||||
CompareResourceVersion(lhs, rhs runtime.Object) int
|
||||
}
|
||||
|
58
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
58
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
@@ -24,7 +24,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
@@ -36,17 +36,22 @@ func init() {
|
||||
mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR"))
|
||||
}
|
||||
|
||||
type CacheMutationDetector interface {
|
||||
// MutationDetector is able to monitor objects for mutation within a limited window of time
|
||||
type MutationDetector interface {
|
||||
// AddObject adds the given object to the set being monitored for a while from now
|
||||
AddObject(obj interface{})
|
||||
|
||||
// Run starts the monitoring and does not return until the monitoring is stopped.
|
||||
Run(stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
func NewCacheMutationDetector(name string) CacheMutationDetector {
|
||||
// NewCacheMutationDetector creates a new instance for the defaultCacheMutationDetector.
|
||||
func NewCacheMutationDetector(name string) MutationDetector {
|
||||
if !mutationDetectionEnabled {
|
||||
return dummyMutationDetector{}
|
||||
}
|
||||
klog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
|
||||
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
|
||||
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second, retainDuration: 2 * time.Minute}
|
||||
}
|
||||
|
||||
type dummyMutationDetector struct{}
|
||||
@@ -63,9 +68,19 @@ type defaultCacheMutationDetector struct {
|
||||
name string
|
||||
period time.Duration
|
||||
|
||||
lock sync.Mutex
|
||||
// compareLock ensures only a single call to CompareObjects runs at a time
|
||||
compareObjectsLock sync.Mutex
|
||||
|
||||
// addLock guards addedObjs between AddObject and CompareObjects
|
||||
addedObjsLock sync.Mutex
|
||||
addedObjs []cacheObj
|
||||
|
||||
cachedObjs []cacheObj
|
||||
|
||||
retainDuration time.Duration
|
||||
lastRotated time.Time
|
||||
retainedCachedObjs []cacheObj
|
||||
|
||||
// failureFunc is injectable for unit testing. If you don't have it, the process will panic.
|
||||
// This panic is intentional, since turning on this detection indicates you want a strong
|
||||
// failure signal. This failure is effectively a p0 bug and you can't trust process results
|
||||
@@ -82,6 +97,14 @@ type cacheObj struct {
|
||||
func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
|
||||
// we DON'T want protection from panics. If we're running this code, we want to die
|
||||
for {
|
||||
if d.lastRotated.IsZero() {
|
||||
d.lastRotated = time.Now()
|
||||
} else if time.Now().Sub(d.lastRotated) > d.retainDuration {
|
||||
d.retainedCachedObjs = d.cachedObjs
|
||||
d.cachedObjs = nil
|
||||
d.lastRotated = time.Now()
|
||||
}
|
||||
|
||||
d.CompareObjects()
|
||||
|
||||
select {
|
||||
@@ -101,20 +124,33 @@ func (d *defaultCacheMutationDetector) AddObject(obj interface{}) {
|
||||
if obj, ok := obj.(runtime.Object); ok {
|
||||
copiedObj := obj.DeepCopyObject()
|
||||
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
|
||||
d.addedObjsLock.Lock()
|
||||
defer d.addedObjsLock.Unlock()
|
||||
d.addedObjs = append(d.addedObjs, cacheObj{cached: obj, copied: copiedObj})
|
||||
}
|
||||
}
|
||||
|
||||
func (d *defaultCacheMutationDetector) CompareObjects() {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
d.compareObjectsLock.Lock()
|
||||
defer d.compareObjectsLock.Unlock()
|
||||
|
||||
// move addedObjs into cachedObjs under lock
|
||||
// this keeps the critical section small to avoid blocking AddObject while we compare cachedObjs
|
||||
d.addedObjsLock.Lock()
|
||||
d.cachedObjs = append(d.cachedObjs, d.addedObjs...)
|
||||
d.addedObjs = nil
|
||||
d.addedObjsLock.Unlock()
|
||||
|
||||
altered := false
|
||||
for i, obj := range d.cachedObjs {
|
||||
if !reflect.DeepEqual(obj.cached, obj.copied) {
|
||||
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied))
|
||||
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied))
|
||||
altered = true
|
||||
}
|
||||
}
|
||||
for i, obj := range d.retainedCachedObjs {
|
||||
if !reflect.DeepEqual(obj.cached, obj.copied) {
|
||||
fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied))
|
||||
altered = true
|
||||
}
|
||||
}
|
||||
|
368
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
368
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@@ -17,57 +17,126 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/naming"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
const defaultExpectedTypeName = "<unspecified>"
|
||||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
type Reflector struct {
|
||||
// name identifies this reflector. By default it will be a file:line if possible.
|
||||
name string
|
||||
// metrics tracks basic metric information about the reflector
|
||||
metrics *reflectorMetrics
|
||||
|
||||
// The type of object we expect to place in the store.
|
||||
// The name of the type we expect to place in the store. The name
|
||||
// will be the stringification of expectedGVK if provided, and the
|
||||
// stringification of expectedType otherwise. It is for display
|
||||
// only, and should not be used for parsing or comparison.
|
||||
expectedTypeName string
|
||||
// An example object of the type we expect to place in the store.
|
||||
// Only the type needs to be right, except that when that is
|
||||
// `unstructured.Unstructured` the object's `"apiVersion"` and
|
||||
// `"kind"` must also be right.
|
||||
expectedType reflect.Type
|
||||
// The GVK of the object we expect to place in the store if unstructured.
|
||||
expectedGVK *schema.GroupVersionKind
|
||||
// The destination to sync up with the watch source
|
||||
store Store
|
||||
// listerWatcher is used to perform lists and watches.
|
||||
listerWatcher ListerWatcher
|
||||
// period controls timing between one watch ending and
|
||||
// the beginning of the next one.
|
||||
period time.Duration
|
||||
|
||||
// backoff manages backoff of ListWatch
|
||||
backoffManager wait.BackoffManager
|
||||
// initConnBackoffManager manages backoff the initial connection with the Watch calll of ListAndWatch.
|
||||
initConnBackoffManager wait.BackoffManager
|
||||
|
||||
resyncPeriod time.Duration
|
||||
// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
|
||||
ShouldResync func() bool
|
||||
// clock allows tests to manipulate time
|
||||
clock clock.Clock
|
||||
// paginatedResult defines whether pagination should be forced for list calls.
|
||||
// It is set based on the result of the initial list call.
|
||||
paginatedResult bool
|
||||
// lastSyncResourceVersion is the resource version token last
|
||||
// observed when doing a sync with the underlying store
|
||||
// it is thread safe, but not synchronized with the underlying store
|
||||
lastSyncResourceVersion string
|
||||
// isLastSyncResourceVersionUnavailable is true if the previous list or watch request with
|
||||
// lastSyncResourceVersion failed with an "expired" or "too large resource version" error.
|
||||
isLastSyncResourceVersionUnavailable bool
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
|
||||
// If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data
|
||||
// (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0")
|
||||
// it will turn off pagination to allow serving them from watch cache.
|
||||
// NOTE: It should be used carefully as paginated lists are always served directly from
|
||||
// etcd, which is significantly less efficient and may lead to serious performance and
|
||||
// scalability problems.
|
||||
WatchListPageSize int64
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
watchErrorHandler WatchErrorHandler
|
||||
}
|
||||
|
||||
// ResourceVersionUpdater is an interface that allows store implementation to
|
||||
// track the current resource version of the reflector. This is especially
|
||||
// important if storage bookmarks are enabled.
|
||||
type ResourceVersionUpdater interface {
|
||||
// UpdateResourceVersion is called each time current resource version of the reflector
|
||||
// is updated.
|
||||
UpdateResourceVersion(resourceVersion string)
|
||||
}
|
||||
|
||||
// The WatchErrorHandler is called whenever ListAndWatch drops the
|
||||
// connection with an error. After calling this handler, the informer
|
||||
// will backoff and retry.
|
||||
//
|
||||
// The default implementation looks at the error type and tries to log
|
||||
// the error message at an appropriate level.
|
||||
//
|
||||
// Implementations of this handler may display the error message in other
|
||||
// ways. Implementations should return quickly - any expensive processing
|
||||
// should be offloaded.
|
||||
type WatchErrorHandler func(r *Reflector, err error)
|
||||
|
||||
// DefaultWatchErrorHandler is the default implementation of WatchErrorHandler
|
||||
func DefaultWatchErrorHandler(r *Reflector, err error) {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
case err == io.EOF:
|
||||
// watch closed normally
|
||||
case err == io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err))
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -79,62 +148,87 @@ var (
|
||||
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
|
||||
// The indexer is configured to key on namespace
|
||||
func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) {
|
||||
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{"namespace": MetaNamespaceIndexFunc})
|
||||
indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc})
|
||||
reflector = NewReflector(lw, expectedType, indexer, resyncPeriod)
|
||||
return indexer, reflector
|
||||
}
|
||||
|
||||
// NewReflector creates a new Reflector object which will keep the given store up to
|
||||
// date with the server's contents for the given resource. Reflector promises to
|
||||
// only put things in the store that have the type of expectedType, unless expectedType
|
||||
// is nil. If resyncPeriod is non-zero, then lists will be executed after every
|
||||
// resyncPeriod, so that you can use reflectors to periodically process everything as
|
||||
// well as incrementally processing the things that change.
|
||||
// NewReflector creates a new Reflector object which will keep the
|
||||
// given store up to date with the server's contents for the given
|
||||
// resource. Reflector promises to only put things in the store that
|
||||
// have the type of expectedType, unless expectedType is nil. If
|
||||
// resyncPeriod is non-zero, then the reflector will periodically
|
||||
// consult its ShouldResync function to determine whether to invoke
|
||||
// the Store's Resync operation; `ShouldResync==nil` means always
|
||||
// "yes". This enables you to use reflectors to periodically process
|
||||
// everything as well as incrementally processing the things that
|
||||
// change.
|
||||
func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod)
|
||||
}
|
||||
|
||||
// NewNamedReflector same as NewReflector, but with a specified name for logging
|
||||
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
|
||||
realClock := &clock.RealClock{}
|
||||
r := &Reflector{
|
||||
name: name,
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
period: time.Second,
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: &clock.RealClock{},
|
||||
// We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when
|
||||
// API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is
|
||||
// 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff.
|
||||
backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
|
||||
resyncPeriod: resyncPeriod,
|
||||
clock: realClock,
|
||||
watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
|
||||
}
|
||||
r.setExpectedType(expectedType)
|
||||
return r
|
||||
}
|
||||
|
||||
func makeValidPrometheusMetricLabel(in string) string {
|
||||
// this isn't perfect, but it removes our common characters
|
||||
return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
|
||||
func (r *Reflector) setExpectedType(expectedType interface{}) {
|
||||
r.expectedType = reflect.TypeOf(expectedType)
|
||||
if r.expectedType == nil {
|
||||
r.expectedTypeName = defaultExpectedTypeName
|
||||
return
|
||||
}
|
||||
|
||||
r.expectedTypeName = r.expectedType.String()
|
||||
|
||||
if obj, ok := expectedType.(*unstructured.Unstructured); ok {
|
||||
// Use gvk to check that watch event objects are of the desired type.
|
||||
gvk := obj.GroupVersionKind()
|
||||
if gvk.Empty() {
|
||||
klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name)
|
||||
return
|
||||
}
|
||||
r.expectedGVK = &gvk
|
||||
r.expectedTypeName = gvk.String()
|
||||
}
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// call chains to NewReflector, so they'd be low entropy names for reflectors
|
||||
var internalPackages = []string{"client-go/tools/cache/"}
|
||||
|
||||
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
|
||||
// Run repeatedly uses the reflector's ListAndWatch to fetch all the
|
||||
// objects and subsequent deltas.
|
||||
// Run will exit when stopCh is closed.
|
||||
func (r *Reflector) Run(stopCh <-chan struct{}) {
|
||||
klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
|
||||
wait.Until(func() {
|
||||
klog.V(2).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
wait.BackoffUntil(func() {
|
||||
if err := r.ListAndWatch(stopCh); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
r.watchErrorHandler(r, err)
|
||||
}
|
||||
}, r.period, stopCh)
|
||||
}, r.backoffManager, true, stopCh)
|
||||
klog.V(2).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
|
||||
}
|
||||
|
||||
var (
|
||||
// nothing will ever be sent down this channel
|
||||
neverExitWatch <-chan time.Time = make(chan time.Time)
|
||||
|
||||
// Used to indicate that watching stopped so that a resync could happen.
|
||||
errorResyncRequested = errors.New("resync channel fired")
|
||||
|
||||
// Used to indicate that watching stopped because of a signal from the stop
|
||||
// channel passed in from a client of the reflector.
|
||||
errorStopRequested = errors.New("Stop requested")
|
||||
@@ -158,18 +252,16 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
|
||||
// and then use the resource version to watch.
|
||||
// It returns error if ListAndWatch didn't even try to initialize watch.
|
||||
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
|
||||
klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name)
|
||||
var resourceVersion string
|
||||
|
||||
// Explicitly set "0" as resource version - it's fine for the List()
|
||||
// to be served from cache and potentially be delayed relative to
|
||||
// etcd contents. Reflector framework will catch up via Watch() eventually.
|
||||
options := metav1.ListOptions{ResourceVersion: "0"}
|
||||
options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
|
||||
|
||||
if err := func() error {
|
||||
initTrace := trace.New("Reflector " + r.name + " ListAndWatch")
|
||||
initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name})
|
||||
defer initTrace.LogIfLong(10 * time.Second)
|
||||
var list runtime.Object
|
||||
var paginatedResult bool
|
||||
var err error
|
||||
listCh := make(chan struct{}, 1)
|
||||
panicCh := make(chan interface{}, 1)
|
||||
@@ -179,7 +271,45 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
panicCh <- r
|
||||
}
|
||||
}()
|
||||
list, err = r.listerWatcher.List(options)
|
||||
// Attempt to gather list in chunks, if supported by listerWatcher, if not, the first
|
||||
// list request will return the full response.
|
||||
pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
|
||||
return r.listerWatcher.List(opts)
|
||||
}))
|
||||
switch {
|
||||
case r.WatchListPageSize != 0:
|
||||
pager.PageSize = r.WatchListPageSize
|
||||
case r.paginatedResult:
|
||||
// We got a paginated result initially. Assume this resource and server honor
|
||||
// paging requests (i.e. watch cache is probably disabled) and leave the default
|
||||
// pager size set.
|
||||
case options.ResourceVersion != "" && options.ResourceVersion != "0":
|
||||
// User didn't explicitly request pagination.
|
||||
//
|
||||
// With ResourceVersion != "", we have a possibility to list from watch cache,
|
||||
// but we do that (for ResourceVersion != "0") only if Limit is unset.
|
||||
// To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly
|
||||
// switch off pagination to force listing from watch cache (if enabled).
|
||||
// With the existing semantic of RV (result is at least as fresh as provided RV),
|
||||
// this is correct and doesn't lead to going back in time.
|
||||
//
|
||||
// We also don't turn off pagination for ResourceVersion="0", since watch cache
|
||||
// is ignoring Limit in that case anyway, and if watch cache is not enabled
|
||||
// we don't introduce regression.
|
||||
pager.PageSize = 0
|
||||
}
|
||||
|
||||
list, paginatedResult, err = pager.List(context.Background(), options)
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
r.setIsLastSyncResourceVersionUnavailable(true)
|
||||
// Retry immediately if the resource version used to list is unavailable.
|
||||
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
|
||||
// continuation pages, but the pager might not be enabled, the full list might fail because the
|
||||
// resource version it is listing at is expired or the cache may not yet be synced to the provided
|
||||
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
|
||||
// the reflector makes forward progress.
|
||||
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
}
|
||||
close(listCh)
|
||||
}()
|
||||
select {
|
||||
@@ -190,22 +320,38 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
case <-listCh:
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
|
||||
return fmt.Errorf("failed to list %v: %v", r.expectedTypeName, err)
|
||||
}
|
||||
|
||||
// We check if the list was paginated and if so set the paginatedResult based on that.
|
||||
// However, we want to do that only for the initial list (which is the only case
|
||||
// when we set ResourceVersion="0"). The reasoning behind it is that later, in some
|
||||
// situations we may force listing directly from etcd (by setting ResourceVersion="")
|
||||
// which will return paginated result, even if watch cache is enabled. However, in
|
||||
// that case, we still want to prefer sending requests to watch cache if possible.
|
||||
//
|
||||
// Paginated result returned for request with ResourceVersion="0" mean that watch
|
||||
// cache is disabled and there are a lot of objects of a given type. In such case,
|
||||
// there is no need to prefer listing from watch cache.
|
||||
if options.ResourceVersion == "0" && paginatedResult {
|
||||
r.paginatedResult = true
|
||||
}
|
||||
|
||||
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
|
||||
return fmt.Errorf("unable to understand list result %#v: %v", list, err)
|
||||
}
|
||||
resourceVersion = listMetaInterface.GetResourceVersion()
|
||||
initTrace.Step("Resource version extracted")
|
||||
items, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
|
||||
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
|
||||
}
|
||||
initTrace.Step("Objects extracted")
|
||||
if err := r.syncWith(items, resourceVersion); err != nil {
|
||||
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
|
||||
return fmt.Errorf("unable to sync list result: %v", err)
|
||||
}
|
||||
initTrace.Step("SyncWith done")
|
||||
r.setLastSyncResourceVersion(resourceVersion)
|
||||
@@ -257,36 +403,38 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
// We want to avoid situations of hanging watchers. Stop any wachers that do not
|
||||
// receive any events within the timeout window.
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
// To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks.
|
||||
// Reflector doesn't assume bookmarks are returned at all (if the server do not support
|
||||
// watch bookmarks, it will ignore this field).
|
||||
AllowWatchBookmarks: true,
|
||||
}
|
||||
|
||||
// start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent
|
||||
start := r.clock.Now()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case io.EOF:
|
||||
// watch closed normally
|
||||
case io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
|
||||
}
|
||||
// If this is "connection refused" error, it means that most likely apiserver is not responsive.
|
||||
// It doesn't make sense to re-list all objects because most likely we will be able to restart
|
||||
// watch where we ended.
|
||||
// If that's the case wait and resend watch request.
|
||||
if urlError, ok := err.(*url.Error); ok {
|
||||
if opError, ok := urlError.Err.(*net.OpError); ok {
|
||||
if errno, ok := opError.Err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// If that's the case begin exponentially backing off and resend watch request.
|
||||
if utilnet.IsConnectionRefused(err) {
|
||||
<-r.initConnBackoffManager.Backoff().C()
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err != errorStopRequested {
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
default:
|
||||
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -303,8 +451,7 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err
|
||||
}
|
||||
|
||||
// watchHandler watches w and keeps *resourceVersion up to date.
|
||||
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
start := r.clock.Now()
|
||||
func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
eventCount := 0
|
||||
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
@@ -323,11 +470,19 @@ loop:
|
||||
break loop
|
||||
}
|
||||
if event.Type == watch.Error {
|
||||
return apierrs.FromObject(event.Object)
|
||||
return apierrors.FromObject(event.Object)
|
||||
}
|
||||
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != nil && e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
|
||||
continue
|
||||
if r.expectedType != nil {
|
||||
if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", r.name, e, a))
|
||||
continue
|
||||
}
|
||||
}
|
||||
if r.expectedGVK != nil {
|
||||
if e, a := *r.expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", r.name, e, a))
|
||||
continue
|
||||
}
|
||||
}
|
||||
meta, err := meta.Accessor(event.Object)
|
||||
if err != nil {
|
||||
@@ -354,20 +509,25 @@ loop:
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err))
|
||||
}
|
||||
case watch.Bookmark:
|
||||
// A `Bookmark` means watch has synced here, just update the resourceVersion
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event))
|
||||
}
|
||||
*resourceVersion = newResourceVersion
|
||||
r.setLastSyncResourceVersion(newResourceVersion)
|
||||
if rvu, ok := r.store.(ResourceVersionUpdater); ok {
|
||||
rvu.UpdateResourceVersion(newResourceVersion)
|
||||
}
|
||||
eventCount++
|
||||
}
|
||||
}
|
||||
|
||||
watchDuration := r.clock.Now().Sub(start)
|
||||
watchDuration := r.clock.Since(start)
|
||||
if watchDuration < 1*time.Second && eventCount == 0 {
|
||||
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
|
||||
}
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
|
||||
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedTypeName, eventCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -384,3 +544,67 @@ func (r *Reflector) setLastSyncResourceVersion(v string) {
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.lastSyncResourceVersion = v
|
||||
}
|
||||
|
||||
// relistResourceVersion determines the resource version the reflector should list or relist from.
|
||||
// Returns either the lastSyncResourceVersion so that this reflector will relist with a resource
|
||||
// versions no older than has already been observed in relist results or watch events, or, if the last relist resulted
|
||||
// in an HTTP 410 (Gone) status code, returns "" so that the relist will use the latest resource version available in
|
||||
// etcd via a quorum read.
|
||||
func (r *Reflector) relistResourceVersion() string {
|
||||
r.lastSyncResourceVersionMutex.RLock()
|
||||
defer r.lastSyncResourceVersionMutex.RUnlock()
|
||||
|
||||
if r.isLastSyncResourceVersionUnavailable {
|
||||
// Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache
|
||||
// if the lastSyncResourceVersion is unavailable, we set ResourceVersion="" and list again to re-establish reflector
|
||||
// to the latest available ResourceVersion, using a consistent read from etcd.
|
||||
return ""
|
||||
}
|
||||
if r.lastSyncResourceVersion == "" {
|
||||
// For performance reasons, initial list performed by reflector uses "0" as resource version to allow it to
|
||||
// be served from the watch cache if it is enabled.
|
||||
return "0"
|
||||
}
|
||||
return r.lastSyncResourceVersion
|
||||
}
|
||||
|
||||
// setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned
|
||||
// "expired" or "too large resource version" error.
|
||||
func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) {
|
||||
r.lastSyncResourceVersionMutex.Lock()
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.isLastSyncResourceVersionUnavailable = isUnavailable
|
||||
}
|
||||
|
||||
func isExpiredError(err error) bool {
|
||||
// In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and
|
||||
// apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
|
||||
// and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrors.IsGone
|
||||
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
|
||||
return apierrors.IsResourceExpired(err) || apierrors.IsGone(err)
|
||||
}
|
||||
|
||||
func isTooLargeResourceVersionError(err error) bool {
|
||||
if apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) {
|
||||
return true
|
||||
}
|
||||
// In Kubernetes 1.17.0-1.18.5, the api server doesn't set the error status cause to
|
||||
// metav1.CauseTypeResourceVersionTooLarge to indicate that the requested minimum resource
|
||||
// version is larger than the largest currently available resource version. To ensure backward
|
||||
// compatibility with these server versions we also need to detect the error based on the content
|
||||
// of the error message field.
|
||||
if !apierrors.IsTimeout(err) {
|
||||
return false
|
||||
}
|
||||
apierr, ok := err.(apierrors.APIStatus)
|
||||
if !ok || apierr == nil || apierr.Status().Details == nil {
|
||||
return false
|
||||
}
|
||||
for _, cause := range apierr.Status().Details.Causes {
|
||||
// Matches the message returned by api server 1.17.0-1.18.5 for this error condition
|
||||
if cause.Message == "Too large resource version" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
30
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
30
vendor/k8s.io/client-go/tools/cache/reflector_metrics.go
generated
vendored
@@ -47,19 +47,6 @@ func (noopMetric) Dec() {}
|
||||
func (noopMetric) Observe(float64) {}
|
||||
func (noopMetric) Set(float64) {}
|
||||
|
||||
type reflectorMetrics struct {
|
||||
numberOfLists CounterMetric
|
||||
listDuration SummaryMetric
|
||||
numberOfItemsInList SummaryMetric
|
||||
|
||||
numberOfWatches CounterMetric
|
||||
numberOfShortWatches CounterMetric
|
||||
watchDuration SummaryMetric
|
||||
numberOfItemsInWatch SummaryMetric
|
||||
|
||||
lastResourceVersion GaugeMetric
|
||||
}
|
||||
|
||||
// MetricsProvider generates various metrics used by the reflector.
|
||||
type MetricsProvider interface {
|
||||
NewListsMetric(name string) CounterMetric
|
||||
@@ -94,23 +81,6 @@ var metricsFactory = struct {
|
||||
metricsProvider: noopMetricsProvider{},
|
||||
}
|
||||
|
||||
func newReflectorMetrics(name string) *reflectorMetrics {
|
||||
var ret *reflectorMetrics
|
||||
if len(name) == 0 {
|
||||
return ret
|
||||
}
|
||||
return &reflectorMetrics{
|
||||
numberOfLists: metricsFactory.metricsProvider.NewListsMetric(name),
|
||||
listDuration: metricsFactory.metricsProvider.NewListDurationMetric(name),
|
||||
numberOfItemsInList: metricsFactory.metricsProvider.NewItemsInListMetric(name),
|
||||
numberOfWatches: metricsFactory.metricsProvider.NewWatchesMetric(name),
|
||||
numberOfShortWatches: metricsFactory.metricsProvider.NewShortWatchesMetric(name),
|
||||
watchDuration: metricsFactory.metricsProvider.NewWatchDurationMetric(name),
|
||||
numberOfItemsInWatch: metricsFactory.metricsProvider.NewItemsInWatchMetric(name),
|
||||
lastResourceVersion: metricsFactory.metricsProvider.NewLastResourceVersionMetric(name),
|
||||
}
|
||||
}
|
||||
|
||||
// SetReflectorMetricsProvider sets the metrics provider
|
||||
func SetReflectorMetricsProvider(metricsProvider MetricsProvider) {
|
||||
metricsFactory.setProviders.Do(func() {
|
||||
|
337
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
337
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@@ -21,48 +21,168 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/utils/buffer"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
|
||||
// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is
|
||||
// one behavior change compared to a standard Informer. When you receive a notification, the cache
|
||||
// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend
|
||||
// on the contents of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT have your item. This
|
||||
// has advantages over the broadcaster since it allows us to share a common cache across many
|
||||
// controllers. Extending the broadcaster would have required us keep duplicate caches for each
|
||||
// watch.
|
||||
// SharedInformer provides eventually consistent linkage of its
|
||||
// clients to the authoritative state of a given collection of
|
||||
// objects. An object is identified by its API group, kind/resource,
|
||||
// namespace (if any), and name; the `ObjectMeta.UID` is not part of
|
||||
// an object's ID as far as this contract is concerned. One
|
||||
// SharedInformer provides linkage to objects of a particular API
|
||||
// group and kind/resource. The linked object collection of a
|
||||
// SharedInformer may be further restricted to one namespace (if
|
||||
// applicable) and/or by label selector and/or field selector.
|
||||
//
|
||||
// The authoritative state of an object is what apiservers provide
|
||||
// access to, and an object goes through a strict sequence of states.
|
||||
// An object state is either (1) present with a ResourceVersion and
|
||||
// other appropriate content or (2) "absent".
|
||||
//
|
||||
// A SharedInformer maintains a local cache --- exposed by GetStore(),
|
||||
// by GetIndexer() in the case of an indexed informer, and possibly by
|
||||
// machinery involved in creating and/or accessing the informer --- of
|
||||
// the state of each relevant object. This cache is eventually
|
||||
// consistent with the authoritative state. This means that, unless
|
||||
// prevented by persistent communication problems, if ever a
|
||||
// particular object ID X is authoritatively associated with a state S
|
||||
// then for every SharedInformer I whose collection includes (X, S)
|
||||
// eventually either (1) I's cache associates X with S or a later
|
||||
// state of X, (2) I is stopped, or (3) the authoritative state
|
||||
// service for X terminates. To be formally complete, we say that the
|
||||
// absent state meets any restriction by label selector or field
|
||||
// selector.
|
||||
//
|
||||
// For a given informer and relevant object ID X, the sequence of
|
||||
// states that appears in the informer's cache is a subsequence of the
|
||||
// states authoritatively associated with X. That is, some states
|
||||
// might never appear in the cache but ordering among the appearing
|
||||
// states is correct. Note, however, that there is no promise about
|
||||
// ordering between states seen for different objects.
|
||||
//
|
||||
// The local cache starts out empty, and gets populated and updated
|
||||
// during `Run()`.
|
||||
//
|
||||
// As a simple example, if a collection of objects is henceforth
|
||||
// unchanging, a SharedInformer is created that links to that
|
||||
// collection, and that SharedInformer is `Run()` then that
|
||||
// SharedInformer's cache eventually holds an exact copy of that
|
||||
// collection (unless it is stopped too soon, the authoritative state
|
||||
// service ends, or communication problems between the two
|
||||
// persistently thwart achievement).
|
||||
//
|
||||
// As another simple example, if the local cache ever holds a
|
||||
// non-absent state for some object ID and the object is eventually
|
||||
// removed from the authoritative state then eventually the object is
|
||||
// removed from the local cache (unless the SharedInformer is stopped
|
||||
// too soon, the authoritative state service ends, or communication
|
||||
// problems persistently thwart the desired result).
|
||||
//
|
||||
// The keys in the Store are of the form namespace/name for namespaced
|
||||
// objects, and are simply the name for non-namespaced objects.
|
||||
// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for
|
||||
// a given object, and `SplitMetaNamespaceKey(key)` to split a key
|
||||
// into its constituent parts.
|
||||
//
|
||||
// Every query against the local cache is answered entirely from one
|
||||
// snapshot of the cache's state. Thus, the result of a `List` call
|
||||
// will not contain two entries with the same namespace and name.
|
||||
//
|
||||
// A client is identified here by a ResourceEventHandler. For every
|
||||
// update to the SharedInformer's local cache and for every client
|
||||
// added before `Run()`, eventually either the SharedInformer is
|
||||
// stopped or the client is notified of the update. A client added
|
||||
// after `Run()` starts gets a startup batch of notifications of
|
||||
// additions of the objects existing in the cache at the time that
|
||||
// client was added; also, for every update to the SharedInformer's
|
||||
// local cache after that client was added, eventually either the
|
||||
// SharedInformer is stopped or that client is notified of that
|
||||
// update. Client notifications happen after the corresponding cache
|
||||
// update and, in the case of a SharedIndexInformer, after the
|
||||
// corresponding index updates. It is possible that additional cache
|
||||
// and index updates happen before such a prescribed notification.
|
||||
// For a given SharedInformer and client, the notifications are
|
||||
// delivered sequentially. For a given SharedInformer, client, and
|
||||
// object ID, the notifications are delivered in order. Because
|
||||
// `ObjectMeta.UID` has no role in identifying objects, it is possible
|
||||
// that when (1) object O1 with ID (e.g. namespace and name) X and
|
||||
// `ObjectMeta.UID` U1 in the SharedInformer's local cache is deleted
|
||||
// and later (2) another object O2 with ID X and ObjectMeta.UID U2 is
|
||||
// created the informer's clients are not notified of (1) and (2) but
|
||||
// rather are notified only of an update from O1 to O2. Clients that
|
||||
// need to detect such cases might do so by comparing the `ObjectMeta.UID`
|
||||
// field of the old and the new object in the code that handles update
|
||||
// notifications (i.e. `OnUpdate` method of ResourceEventHandler).
|
||||
//
|
||||
// A client must process each notification promptly; a SharedInformer
|
||||
// is not engineered to deal well with a large backlog of
|
||||
// notifications to deliver. Lengthy processing should be passed off
|
||||
// to something else, for example through a
|
||||
// `client-go/util/workqueue`.
|
||||
//
|
||||
// A delete notification exposes the last locally known non-absent
|
||||
// state, except that its ResourceVersion is replaced with a
|
||||
// ResourceVersion in which the object is actually absent.
|
||||
type SharedInformer interface {
|
||||
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
|
||||
// period. Events to a single handler are delivered sequentially, but there is no coordination
|
||||
// between different handlers.
|
||||
AddEventHandler(handler ResourceEventHandler)
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the
|
||||
// specified resync period. Events to a single handler are delivered sequentially, but there is
|
||||
// no coordination between different handlers.
|
||||
// AddEventHandlerWithResyncPeriod adds an event handler to the
|
||||
// shared informer with the requested resync period; zero means
|
||||
// this handler does not care about resyncs. The resync operation
|
||||
// consists of delivering to the handler an update notification
|
||||
// for every object in the informer's local cache; it does not add
|
||||
// any interactions with the authoritative storage. Some
|
||||
// informers do no resyncs at all, not even for handlers added
|
||||
// with a non-zero resyncPeriod. For an informer that does
|
||||
// resyncs, and for each handler that requests resyncs, that
|
||||
// informer develops a nominal resync period that is no shorter
|
||||
// than the requested period but may be longer. The actual time
|
||||
// between any two resyncs may be longer than the nominal period
|
||||
// because the implementation takes time to do work and there may
|
||||
// be competing load and scheduling noise.
|
||||
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
|
||||
// GetStore returns the Store.
|
||||
// GetStore returns the informer's local cache as a Store.
|
||||
GetStore() Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
// GetController is deprecated, it does nothing useful
|
||||
GetController() Controller
|
||||
// Run starts the shared informer, which will be stopped when stopCh is closed.
|
||||
// Run starts and runs the shared informer, returning after it stops.
|
||||
// The informer will be stopped when stopCh is closed.
|
||||
Run(stopCh <-chan struct{})
|
||||
// HasSynced returns true if the shared informer's store has synced.
|
||||
// HasSynced returns true if the shared informer's store has been
|
||||
// informed by at least one full LIST of the authoritative state
|
||||
// of the informer's object collection. This is unrelated to "resync".
|
||||
HasSynced() bool
|
||||
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
|
||||
// store. The value returned is not synchronized with access to the underlying store and is not
|
||||
// thread-safe.
|
||||
LastSyncResourceVersion() string
|
||||
|
||||
// The WatchErrorHandler is called whenever ListAndWatch drops the
|
||||
// connection with an error. After calling this handler, the informer
|
||||
// will backoff and retry.
|
||||
//
|
||||
// The default implementation looks at the error type and tries to log
|
||||
// the error message at an appropriate level.
|
||||
//
|
||||
// There's only one handler, so if you call this multiple times, last one
|
||||
// wins; calling after the informer has been started returns an error.
|
||||
//
|
||||
// The handler is intended for visibility, not to e.g. pause the consumers.
|
||||
// The handler should return quickly - any expensive processing should be
|
||||
// offloaded.
|
||||
SetWatchErrorHandler(handler WatchErrorHandler) error
|
||||
}
|
||||
|
||||
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
|
||||
type SharedIndexInformer interface {
|
||||
SharedInformer
|
||||
// AddIndexers add indexers to the informer before it starts.
|
||||
@@ -71,21 +191,32 @@ type SharedIndexInformer interface {
|
||||
}
|
||||
|
||||
// NewSharedInformer creates a new instance for the listwatcher.
|
||||
func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
|
||||
func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{})
|
||||
}
|
||||
|
||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
// The created informer will not do resyncs if the given
|
||||
// defaultEventHandlerResyncPeriod is zero. Otherwise: for each
|
||||
// handler that with a non-zero requested resync period, whether added
|
||||
// before or after the informer starts, the nominal resync period is
|
||||
// the requested resync period rounded up to a multiple of the
|
||||
// informer's resync checking period. Such an informer's resync
|
||||
// checking period is established when the informer starts running,
|
||||
// and is the maximum of (a) the minimum of the resync periods
|
||||
// requested before the informer starts and the
|
||||
// defaultEventHandlerResyncPeriod given here and (b) the constant
|
||||
// `minimumResyncPeriod` defined in this file.
|
||||
func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
|
||||
realClock := &clock.RealClock{}
|
||||
sharedIndexInformer := &sharedIndexInformer{
|
||||
processor: &sharedProcessor{clock: realClock},
|
||||
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: objType,
|
||||
objectType: exampleObject,
|
||||
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
|
||||
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
|
||||
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)),
|
||||
clock: realClock,
|
||||
}
|
||||
return sharedIndexInformer
|
||||
@@ -102,10 +233,26 @@ const (
|
||||
initialBufferSize = 1024
|
||||
)
|
||||
|
||||
// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages
|
||||
// indicating that the caller identified by name is waiting for syncs, followed by
|
||||
// either a successful or failed sync.
|
||||
func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
|
||||
klog.Infof("Waiting for caches to sync for %s", controllerName)
|
||||
|
||||
if !WaitForCacheSync(stopCh, cacheSyncs...) {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName))
|
||||
return false
|
||||
}
|
||||
|
||||
klog.Infof("Caches are synced for %s ", controllerName)
|
||||
return true
|
||||
}
|
||||
|
||||
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
|
||||
// if the controller should shutdown
|
||||
// callers should prefer WaitForNamedCacheSync()
|
||||
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
|
||||
err := wait.PollUntil(syncedPollPeriod,
|
||||
err := wait.PollImmediateUntil(syncedPollPeriod,
|
||||
func() (bool, error) {
|
||||
for _, syncFunc := range cacheSyncs {
|
||||
if !syncFunc() {
|
||||
@@ -124,16 +271,33 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
|
||||
return true
|
||||
}
|
||||
|
||||
// `*sharedIndexInformer` implements SharedIndexInformer and has three
|
||||
// main components. One is an indexed local cache, `indexer Indexer`.
|
||||
// The second main component is a Controller that pulls
|
||||
// objects/notifications using the ListerWatcher and pushes them into
|
||||
// a DeltaFIFO --- whose knownObjects is the informer's local cache
|
||||
// --- while concurrently Popping Deltas values from that fifo and
|
||||
// processing them with `sharedIndexInformer::HandleDeltas`. Each
|
||||
// invocation of HandleDeltas, which is done with the fifo's lock
|
||||
// held, processes each Delta in turn. For each Delta this both
|
||||
// updates the local cache and stuffs the relevant notification into
|
||||
// the sharedProcessor. The third main component is that
|
||||
// sharedProcessor, which is responsible for relaying those
|
||||
// notifications to each of the informer's clients.
|
||||
type sharedIndexInformer struct {
|
||||
indexer Indexer
|
||||
controller Controller
|
||||
|
||||
processor *sharedProcessor
|
||||
cacheMutationDetector CacheMutationDetector
|
||||
cacheMutationDetector MutationDetector
|
||||
|
||||
// This block is tracked to handle late initialization of the controller
|
||||
listerWatcher ListerWatcher
|
||||
objectType runtime.Object
|
||||
|
||||
// objectType is an example object of the type this informer is
|
||||
// expected to handle. Only the type needs to be right, except
|
||||
// that when that is `unstructured.Unstructured` the object's
|
||||
// `"apiVersion"` and `"kind"` must also be right.
|
||||
objectType runtime.Object
|
||||
|
||||
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
|
||||
// shouldResync to check if any of our listeners need a resync.
|
||||
@@ -151,6 +315,9 @@ type sharedIndexInformer struct {
|
||||
// blockDeltas gives a way to stop all event distribution so that a late event handler
|
||||
// can safely join the shared informer.
|
||||
blockDeltas sync.Mutex
|
||||
|
||||
// Called whenever the ListAndWatch drops the connection with an error.
|
||||
watchErrorHandler WatchErrorHandler
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
@@ -169,7 +336,7 @@ func (v *dummyController) HasSynced() bool {
|
||||
return v.informer.HasSynced()
|
||||
}
|
||||
|
||||
func (c *dummyController) LastSyncResourceVersion() string {
|
||||
func (v *dummyController) LastSyncResourceVersion() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -186,10 +353,25 @@ type deleteNotification struct {
|
||||
oldObj interface{}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
s.watchErrorHandler = handler
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, s.indexer)
|
||||
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
|
||||
KnownObjects: s.indexer,
|
||||
EmitDeltaTypeReplaced: true,
|
||||
})
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
@@ -199,7 +381,8 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
RetryOnError: false,
|
||||
ShouldResync: s.processor.shouldResync,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
Process: s.HandleDeltas,
|
||||
WatchErrorHandler: s.watchErrorHandler,
|
||||
}
|
||||
|
||||
func() {
|
||||
@@ -302,13 +485,13 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
|
||||
if resyncPeriod > 0 {
|
||||
if resyncPeriod < minimumResyncPeriod {
|
||||
klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
|
||||
klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod)
|
||||
resyncPeriod = minimumResyncPeriod
|
||||
}
|
||||
|
||||
if resyncPeriod < s.resyncCheckPeriod {
|
||||
if s.started {
|
||||
klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
|
||||
resyncPeriod = s.resyncCheckPeriod
|
||||
} else {
|
||||
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
|
||||
@@ -348,19 +531,33 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(Deltas) {
|
||||
switch d.Type {
|
||||
case Sync, Added, Updated:
|
||||
isSync := d.Type == Sync
|
||||
case Sync, Replaced, Added, Updated:
|
||||
s.cacheMutationDetector.AddObject(d.Object)
|
||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||
if err := s.indexer.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isSync := false
|
||||
switch {
|
||||
case d.Type == Sync:
|
||||
// Sync events are only propagated to listeners that requested resync
|
||||
isSync = true
|
||||
case d.Type == Replaced:
|
||||
if accessor, err := meta.Accessor(d.Object); err == nil {
|
||||
if oldAccessor, err := meta.Accessor(old); err == nil {
|
||||
// Replaced events that didn't change resourceVersion are treated as resync events
|
||||
// and only propagated to listeners that requested resync
|
||||
isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion()
|
||||
}
|
||||
}
|
||||
}
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync)
|
||||
} else {
|
||||
if err := s.indexer.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(addNotification{newObj: d.Object}, isSync)
|
||||
s.processor.distribute(addNotification{newObj: d.Object}, false)
|
||||
}
|
||||
case Deleted:
|
||||
if err := s.indexer.Delete(d.Object); err != nil {
|
||||
@@ -372,6 +569,12 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sharedProcessor has a collection of processorListener and can
|
||||
// distribute a notification object to its listeners. There are two
|
||||
// kinds of distribute operations. The sync distributions go to a
|
||||
// subset of the listeners that (a) is recomputed in the occasional
|
||||
// calls to shouldResync and (b) every listener is initially put in.
|
||||
// The non-sync distributions go to every listener.
|
||||
type sharedProcessor struct {
|
||||
listenersStarted bool
|
||||
listenersLock sync.RWMutex
|
||||
@@ -463,6 +666,17 @@ func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Durati
|
||||
}
|
||||
}
|
||||
|
||||
// processorListener relays notifications from a sharedProcessor to
|
||||
// one ResourceEventHandler --- using two goroutines, two unbuffered
|
||||
// channels, and an unbounded ring buffer. The `add(notification)`
|
||||
// function sends the given notification to `addCh`. One goroutine
|
||||
// runs `pop()`, which pumps notifications from `addCh` to `nextCh`
|
||||
// using storage in the ring buffer while `nextCh` is not keeping up.
|
||||
// Another goroutine runs `run()`, which receives notifications from
|
||||
// `nextCh` and synchronously invokes the appropriate handler method.
|
||||
//
|
||||
// processorListener also keeps track of the adjusted requested resync
|
||||
// period of the listener.
|
||||
type processorListener struct {
|
||||
nextCh chan interface{}
|
||||
addCh chan interface{}
|
||||
@@ -476,11 +690,22 @@ type processorListener struct {
|
||||
// we should try to do something better.
|
||||
pendingNotifications buffer.RingGrowing
|
||||
|
||||
// requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer
|
||||
// requestedResyncPeriod is how frequently the listener wants a
|
||||
// full resync from the shared informer, but modified by two
|
||||
// adjustments. One is imposing a lower bound,
|
||||
// `minimumResyncPeriod`. The other is another lower bound, the
|
||||
// sharedProcessor's `resyncCheckPeriod`, that is imposed (a) only
|
||||
// in AddEventHandlerWithResyncPeriod invocations made after the
|
||||
// sharedProcessor starts and (b) only if the informer does
|
||||
// resyncs at all.
|
||||
requestedResyncPeriod time.Duration
|
||||
// resyncPeriod is how frequently the listener wants a full resync from the shared informer. This
|
||||
// value may differ from requestedResyncPeriod if the shared informer adjusts it to align with the
|
||||
// informer's overall resync check period.
|
||||
// resyncPeriod is the threshold that will be used in the logic
|
||||
// for this listener. This value differs from
|
||||
// requestedResyncPeriod only when the sharedIndexInformer does
|
||||
// not do resyncs, in which case the value here is zero. The
|
||||
// actual time between resyncs depends on when the
|
||||
// sharedProcessor's `shouldResync` function is invoked and when
|
||||
// the sharedIndexInformer processes `Sync` type Delta objects.
|
||||
resyncPeriod time.Duration
|
||||
// nextResync is the earliest time the listener should get a full resync
|
||||
nextResync time.Time
|
||||
@@ -544,29 +769,21 @@ func (p *processorListener) run() {
|
||||
// delivering again.
|
||||
stopCh := make(chan struct{})
|
||||
wait.Until(func() {
|
||||
// this gives us a few quick retries before a long pause and then a few more quick retries
|
||||
err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
|
||||
for next := range p.nextCh {
|
||||
switch notification := next.(type) {
|
||||
case updateNotification:
|
||||
p.handler.OnUpdate(notification.oldObj, notification.newObj)
|
||||
case addNotification:
|
||||
p.handler.OnAdd(notification.newObj)
|
||||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
|
||||
}
|
||||
for next := range p.nextCh {
|
||||
switch notification := next.(type) {
|
||||
case updateNotification:
|
||||
p.handler.OnUpdate(notification.oldObj, notification.newObj)
|
||||
case addNotification:
|
||||
p.handler.OnAdd(notification.newObj)
|
||||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next))
|
||||
}
|
||||
// the only way to get here is if the p.nextCh is empty and closed
|
||||
return true, nil
|
||||
})
|
||||
|
||||
// the only way to get here is if the p.nextCh is empty and closed
|
||||
if err == nil {
|
||||
close(stopCh)
|
||||
}
|
||||
}, 1*time.Minute, stopCh)
|
||||
// the only way to get here is if the p.nextCh is empty and closed
|
||||
close(stopCh)
|
||||
}, 1*time.Second, stopCh)
|
||||
}
|
||||
|
||||
// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0,
|
||||
|
46
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
46
vendor/k8s.io/client-go/tools/cache/store.go
generated
vendored
@@ -23,27 +23,50 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
)
|
||||
|
||||
// Store is a generic object storage interface. Reflector knows how to watch a server
|
||||
// and update a store. A generic store is provided, which allows Reflector to be used
|
||||
// as a local caching system, and an LRU store, which allows Reflector to work like a
|
||||
// queue of items yet to be processed.
|
||||
// Store is a generic object storage and processing interface. A
|
||||
// Store holds a map from string keys to accumulators, and has
|
||||
// operations to add, update, and delete a given object to/from the
|
||||
// accumulator currently associated with a given key. A Store also
|
||||
// knows how to extract the key from a given object, so many operations
|
||||
// are given only the object.
|
||||
//
|
||||
// Store makes no assumptions about stored object identity; it is the responsibility
|
||||
// of a Store implementation to provide a mechanism to correctly key objects and to
|
||||
// define the contract for obtaining objects by some arbitrary key type.
|
||||
// In the simplest Store implementations each accumulator is simply
|
||||
// the last given object, or empty after Delete, and thus the Store's
|
||||
// behavior is simple storage.
|
||||
//
|
||||
// Reflector knows how to watch a server and update a Store. This
|
||||
// package provides a variety of implementations of Store.
|
||||
type Store interface {
|
||||
|
||||
// Add adds the given object to the accumulator associated with the given object's key
|
||||
Add(obj interface{}) error
|
||||
|
||||
// Update updates the given object in the accumulator associated with the given object's key
|
||||
Update(obj interface{}) error
|
||||
|
||||
// Delete deletes the given object from the accumulator associated with the given object's key
|
||||
Delete(obj interface{}) error
|
||||
|
||||
// List returns a list of all the currently non-empty accumulators
|
||||
List() []interface{}
|
||||
|
||||
// ListKeys returns a list of all the keys currently associated with non-empty accumulators
|
||||
ListKeys() []string
|
||||
|
||||
// Get returns the accumulator associated with the given object's key
|
||||
Get(obj interface{}) (item interface{}, exists bool, err error)
|
||||
|
||||
// GetByKey returns the accumulator associated with the given key
|
||||
GetByKey(key string) (item interface{}, exists bool, err error)
|
||||
|
||||
// Replace will delete the contents of the store, using instead the
|
||||
// given list. Store takes ownership of the list, you should not reference
|
||||
// it after calling this function.
|
||||
Replace([]interface{}, string) error
|
||||
|
||||
// Resync is meaningless in the terms appearing here but has
|
||||
// meaning in some implementations that have non-trivial
|
||||
// additional behavior (e.g., DeltaFIFO).
|
||||
Resync() error
|
||||
}
|
||||
|
||||
@@ -106,9 +129,8 @@ func SplitMetaNamespaceKey(key string) (namespace, name string, err error) {
|
||||
return "", "", fmt.Errorf("unexpected key format: %q", key)
|
||||
}
|
||||
|
||||
// cache responsibilities are limited to:
|
||||
// 1. Computing keys for objects via keyFunc
|
||||
// 2. Invoking methods of a ThreadSafeStorage interface
|
||||
// `*cache` implements Indexer in terms of a ThreadSafeStore and an
|
||||
// associated KeyFunc.
|
||||
type cache struct {
|
||||
// cacheStorage bears the burden of thread safety for the cache
|
||||
cacheStorage ThreadSafeStore
|
||||
@@ -222,9 +244,9 @@ func (c *cache) Replace(list []interface{}, resourceVersion string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resync touches all items in the store to force processing
|
||||
// Resync is meaningless for one of these
|
||||
func (c *cache) Resync() error {
|
||||
return c.cacheStorage.Resync()
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewStore returns a Store implemented simply with a map and a lock.
|
||||
|
55
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
55
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
@@ -23,7 +23,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ThreadSafeStore is an interface that allows concurrent access to a storage backend.
|
||||
// ThreadSafeStore is an interface that allows concurrent indexed
|
||||
// access to a storage backend. It is like Indexer but does not
|
||||
// (necessarily) know how to extract the Store key from a given
|
||||
// object.
|
||||
//
|
||||
// TL;DR caveats: you must not modify anything returned by Get or List as it will break
|
||||
// the indexing feature in addition to not being thread safe.
|
||||
//
|
||||
@@ -51,6 +55,7 @@ type ThreadSafeStore interface {
|
||||
// AddIndexers adds more indexers to this store. If you call this after you already have data
|
||||
// in the store, the results are undefined.
|
||||
AddIndexers(newIndexers Indexers) error
|
||||
// Resync is a no-op and is deprecated
|
||||
Resync() error
|
||||
}
|
||||
|
||||
@@ -131,8 +136,8 @@ func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion st
|
||||
}
|
||||
}
|
||||
|
||||
// Index returns a list of items that match on the index function
|
||||
// Index is thread-safe so long as you treat all items as immutable
|
||||
// Index returns a list of items that match the given object on the index function.
|
||||
// Index is thread-safe so long as you treat all items as immutable.
|
||||
func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
@@ -142,37 +147,37 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{},
|
||||
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
|
||||
}
|
||||
|
||||
indexKeys, err := indexFunc(obj)
|
||||
indexedValues, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index := c.indices[indexName]
|
||||
|
||||
var returnKeySet sets.String
|
||||
if len(indexKeys) == 1 {
|
||||
var storeKeySet sets.String
|
||||
if len(indexedValues) == 1 {
|
||||
// In majority of cases, there is exactly one value matching.
|
||||
// Optimize the most common path - deduping is not needed here.
|
||||
returnKeySet = index[indexKeys[0]]
|
||||
storeKeySet = index[indexedValues[0]]
|
||||
} else {
|
||||
// Need to de-dupe the return list.
|
||||
// Since multiple keys are allowed, this can happen.
|
||||
returnKeySet = sets.String{}
|
||||
for _, indexKey := range indexKeys {
|
||||
for key := range index[indexKey] {
|
||||
returnKeySet.Insert(key)
|
||||
storeKeySet = sets.String{}
|
||||
for _, indexedValue := range indexedValues {
|
||||
for key := range index[indexedValue] {
|
||||
storeKeySet.Insert(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
list := make([]interface{}, 0, returnKeySet.Len())
|
||||
for absoluteKey := range returnKeySet {
|
||||
list = append(list, c.items[absoluteKey])
|
||||
list := make([]interface{}, 0, storeKeySet.Len())
|
||||
for storeKey := range storeKeySet {
|
||||
list = append(list, c.items[storeKey])
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// ByIndex returns a list of items that match an exact value on the index function
|
||||
func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, error) {
|
||||
// ByIndex returns a list of the items whose indexed values in the given index include the given indexed value
|
||||
func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{}, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
@@ -183,18 +188,18 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexKey]
|
||||
set := index[indexedValue]
|
||||
list := make([]interface{}, 0, set.Len())
|
||||
for _, key := range set.List() {
|
||||
for key := range set {
|
||||
list = append(list, c.items[key])
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// IndexKeys returns a list of keys that match on the index function.
|
||||
// IndexKeys returns a list of the Store keys of the objects whose indexed values in the given index include the given indexed value.
|
||||
// IndexKeys is thread-safe so long as you treat all items as immutable.
|
||||
func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error) {
|
||||
func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
@@ -205,7 +210,7 @@ func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error)
|
||||
|
||||
index := c.indices[indexName]
|
||||
|
||||
set := index[indexKey]
|
||||
set := index[indexedValue]
|
||||
return set.List(), nil
|
||||
}
|
||||
|
||||
@@ -292,6 +297,13 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
|
||||
set := index[indexValue]
|
||||
if set != nil {
|
||||
set.Delete(key)
|
||||
|
||||
// If we don't delete the set when zero, indices with high cardinality
|
||||
// short lived resources can cause memory to increase over time from
|
||||
// unused empty sets. See `kubernetes/kubernetes/issues/84959`.
|
||||
if len(set) == 0 {
|
||||
delete(index, indexValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -302,6 +314,7 @@ func (c *threadSafeMap) Resync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewThreadSafeStore creates a new instance of ThreadSafeStore.
|
||||
func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore {
|
||||
return &threadSafeMap{
|
||||
items: map[string]interface{}{},
|
||||
|
8
vendor/k8s.io/client-go/tools/cache/undelta_store.go
generated
vendored
8
vendor/k8s.io/client-go/tools/cache/undelta_store.go
generated
vendored
@@ -31,6 +31,7 @@ type UndeltaStore struct {
|
||||
// Assert that it implements the Store interface.
|
||||
var _ Store = &UndeltaStore{}
|
||||
|
||||
// Add inserts an object into the store and sends complete state by calling PushFunc.
|
||||
// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods.
|
||||
// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc}
|
||||
// and the List. So, the following can happen, resulting in two identical calls to PushFunc.
|
||||
@@ -41,7 +42,6 @@ var _ Store = &UndeltaStore{}
|
||||
// 3 Store.Add(b)
|
||||
// 4 Store.List() -> [a,b]
|
||||
// 5 Store.List() -> [a,b]
|
||||
|
||||
func (u *UndeltaStore) Add(obj interface{}) error {
|
||||
if err := u.Store.Add(obj); err != nil {
|
||||
return err
|
||||
@@ -50,6 +50,7 @@ func (u *UndeltaStore) Add(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update sets an item in the cache to its updated state and sends complete state by calling PushFunc.
|
||||
func (u *UndeltaStore) Update(obj interface{}) error {
|
||||
if err := u.Store.Update(obj); err != nil {
|
||||
return err
|
||||
@@ -58,6 +59,7 @@ func (u *UndeltaStore) Update(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes an item from the cache and sends complete state by calling PushFunc.
|
||||
func (u *UndeltaStore) Delete(obj interface{}) error {
|
||||
if err := u.Store.Delete(obj); err != nil {
|
||||
return err
|
||||
@@ -66,6 +68,10 @@ func (u *UndeltaStore) Delete(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Replace will delete the contents of current store, using instead the given list.
|
||||
// 'u' takes ownership of the list, you should not reference the list again
|
||||
// after calling this function.
|
||||
// The new contents complete state will be sent by calling PushFunc after replacement.
|
||||
func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error {
|
||||
if err := u.Store.Replace(list, resourceVersion); err != nil {
|
||||
return err
|
||||
|
Reference in New Issue
Block a user