go.*: Update k8s packages
- update k8s client_go - update k8s apiextensions-apiserver - update k8s controller-tools Signed-off-by: leonnicolas <leonloechner@gmx.de>
This commit is contained in:
184
vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go
generated
vendored
184
vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go
generated
vendored
@@ -17,8 +17,6 @@ limitations under the License.
|
||||
package typed
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/value"
|
||||
@@ -170,78 +168,94 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err
|
||||
if lhs != nil {
|
||||
lLen = lhs.Length()
|
||||
}
|
||||
out := make([]interface{}, 0, int(math.Max(float64(rLen), float64(lLen))))
|
||||
outLen := lLen
|
||||
if outLen < rLen {
|
||||
outLen = rLen
|
||||
}
|
||||
out := make([]interface{}, 0, outLen)
|
||||
|
||||
// TODO: ordering is totally wrong.
|
||||
// TODO: might as well make the map order work the same way.
|
||||
rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs)
|
||||
errs = append(errs, rhsErrs...)
|
||||
lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs)
|
||||
errs = append(errs, lhsErrs...)
|
||||
|
||||
// This is a cheap hack to at least make the output order stable.
|
||||
rhsOrder := make([]fieldpath.PathElement, 0, rLen)
|
||||
|
||||
// First, collect all RHS children.
|
||||
observedRHS := fieldpath.MakePathElementValueMap(rLen)
|
||||
if rhs != nil {
|
||||
for i := 0; i < rhs.Length(); i++ {
|
||||
child := rhs.At(i)
|
||||
pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child)
|
||||
if err != nil {
|
||||
errs = append(errs, errorf("rhs: element %v: %v", i, err.Error())...)
|
||||
// If we can't construct the path element, we can't
|
||||
// even report errors deeper in the schema, so bail on
|
||||
// this element.
|
||||
continue
|
||||
}
|
||||
if _, ok := observedRHS.Get(pe); ok {
|
||||
errs = append(errs, errorf("rhs: duplicate entries for key %v", pe.String())...)
|
||||
}
|
||||
observedRHS.Insert(pe, child)
|
||||
rhsOrder = append(rhsOrder, pe)
|
||||
sharedOrder := make([]*fieldpath.PathElement, 0, rLen)
|
||||
for i := range rhsOrder {
|
||||
pe := &rhsOrder[i]
|
||||
if _, ok := observedLHS.Get(*pe); ok {
|
||||
sharedOrder = append(sharedOrder, pe)
|
||||
}
|
||||
}
|
||||
|
||||
// Then merge with LHS children.
|
||||
observedLHS := fieldpath.MakePathElementSet(lLen)
|
||||
if lhs != nil {
|
||||
for i := 0; i < lhs.Length(); i++ {
|
||||
child := lhs.At(i)
|
||||
pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child)
|
||||
if err != nil {
|
||||
errs = append(errs, errorf("lhs: element %v: %v", i, err.Error())...)
|
||||
// If we can't construct the path element, we can't
|
||||
// even report errors deeper in the schema, so bail on
|
||||
// this element.
|
||||
continue
|
||||
}
|
||||
if observedLHS.Has(pe) {
|
||||
errs = append(errs, errorf("lhs: duplicate entries for key %v", pe.String())...)
|
||||
continue
|
||||
}
|
||||
observedLHS.Insert(pe)
|
||||
w2 := w.prepareDescent(pe, t.ElementType)
|
||||
w2.lhs = value.Value(child)
|
||||
if rchild, ok := observedRHS.Get(pe); ok {
|
||||
w2.rhs = rchild
|
||||
}
|
||||
errs = append(errs, w2.merge(pe.String)...)
|
||||
if w2.out != nil {
|
||||
out = append(out, *w2.out)
|
||||
}
|
||||
w.finishDescent(w2)
|
||||
}
|
||||
var nextShared *fieldpath.PathElement
|
||||
if len(sharedOrder) > 0 {
|
||||
nextShared = sharedOrder[0]
|
||||
sharedOrder = sharedOrder[1:]
|
||||
}
|
||||
|
||||
for _, pe := range rhsOrder {
|
||||
if observedLHS.Has(pe) {
|
||||
continue
|
||||
lLen, rLen = len(lhsOrder), len(rhsOrder)
|
||||
for lI, rI := 0, 0; lI < lLen || rI < rLen; {
|
||||
if lI < lLen && rI < rLen {
|
||||
pe := lhsOrder[lI]
|
||||
if pe.Equals(rhsOrder[rI]) {
|
||||
// merge LHS & RHS items
|
||||
lChild, _ := observedLHS.Get(pe)
|
||||
rChild, _ := observedRHS.Get(pe)
|
||||
mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild)
|
||||
errs = append(errs, errs...)
|
||||
if mergeOut != nil {
|
||||
out = append(out, *mergeOut)
|
||||
}
|
||||
lI++
|
||||
rI++
|
||||
|
||||
nextShared = nil
|
||||
if len(sharedOrder) > 0 {
|
||||
nextShared = sharedOrder[0]
|
||||
sharedOrder = sharedOrder[1:]
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) {
|
||||
// shared item, but not the one we want in this round
|
||||
lI++
|
||||
continue
|
||||
}
|
||||
}
|
||||
value, _ := observedRHS.Get(pe)
|
||||
w2 := w.prepareDescent(pe, t.ElementType)
|
||||
w2.rhs = value
|
||||
errs = append(errs, w2.merge(pe.String)...)
|
||||
if w2.out != nil {
|
||||
out = append(out, *w2.out)
|
||||
if lI < lLen {
|
||||
pe := lhsOrder[lI]
|
||||
if _, ok := observedRHS.Get(pe); !ok {
|
||||
// take LHS item
|
||||
lChild, _ := observedLHS.Get(pe)
|
||||
mergeOut, errs := w.mergeListItem(t, pe, lChild, nil)
|
||||
errs = append(errs, errs...)
|
||||
if mergeOut != nil {
|
||||
out = append(out, *mergeOut)
|
||||
}
|
||||
lI++
|
||||
continue
|
||||
}
|
||||
}
|
||||
if rI < rLen {
|
||||
// Take the RHS item, merge with matching LHS item if possible
|
||||
pe := rhsOrder[rI]
|
||||
lChild, _ := observedLHS.Get(pe) // may be nil
|
||||
rChild, _ := observedRHS.Get(pe)
|
||||
mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild)
|
||||
errs = append(errs, errs...)
|
||||
if mergeOut != nil {
|
||||
out = append(out, *mergeOut)
|
||||
}
|
||||
rI++
|
||||
// Advance nextShared, if we are merging nextShared.
|
||||
if nextShared != nil && nextShared.Equals(pe) {
|
||||
nextShared = nil
|
||||
if len(sharedOrder) > 0 {
|
||||
nextShared = sharedOrder[0]
|
||||
sharedOrder = sharedOrder[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
w.finishDescent(w2)
|
||||
}
|
||||
|
||||
if len(out) > 0 {
|
||||
@@ -252,6 +266,46 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err
|
||||
return errs
|
||||
}
|
||||
|
||||
func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) {
|
||||
var errs ValidationErrors
|
||||
length := 0
|
||||
if list != nil {
|
||||
length = list.Length()
|
||||
}
|
||||
observed := fieldpath.MakePathElementValueMap(length)
|
||||
pes := make([]fieldpath.PathElement, 0, length)
|
||||
for i := 0; i < length; i++ {
|
||||
child := list.At(i)
|
||||
pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child)
|
||||
if err != nil {
|
||||
errs = append(errs, errorf("element %v: %v", i, err.Error())...)
|
||||
// If we can't construct the path element, we can't
|
||||
// even report errors deeper in the schema, so bail on
|
||||
// this element.
|
||||
continue
|
||||
}
|
||||
if _, found := observed.Get(pe); found {
|
||||
errs = append(errs, errorf("duplicate entries for key %v", pe.String())...)
|
||||
continue
|
||||
}
|
||||
observed.Insert(pe, child)
|
||||
pes = append(pes, pe)
|
||||
}
|
||||
return pes, observed, errs
|
||||
}
|
||||
|
||||
func (w *mergingWalker) mergeListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) (out *interface{}, errs ValidationErrors) {
|
||||
w2 := w.prepareDescent(pe, t.ElementType)
|
||||
w2.lhs = lChild
|
||||
w2.rhs = rChild
|
||||
errs = append(errs, w2.merge(pe.String)...)
|
||||
if w2.out != nil {
|
||||
out = w2.out
|
||||
}
|
||||
w.finishDescent(w2)
|
||||
return
|
||||
}
|
||||
|
||||
func (w *mergingWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) {
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
|
||||
71
vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go
generated
vendored
71
vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go
generated
vendored
@@ -124,13 +124,6 @@ func ReconcileFieldSetWithSchema(fieldset *fieldpath.Set, tv *TypedValue) (*fiel
|
||||
v.schema = tv.schema
|
||||
v.typeRef = tv.typeRef
|
||||
|
||||
// We don't reconcile deduced types, which are primarily for use by unstructured CRDs. Deduced
|
||||
// types do not support atomic or granular tags. Nor does the dynamic schema deduction
|
||||
// interact well with the reconcile logic.
|
||||
if v.schema == DeducedParseableType.Schema {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
defer v.finished()
|
||||
errs := v.reconcile()
|
||||
|
||||
@@ -187,19 +180,17 @@ func (v *reconcileWithSchemaWalker) visitListItems(t *schema.List, element *fiel
|
||||
}
|
||||
|
||||
func (v *reconcileWithSchemaWalker) doList(t *schema.List) (errs ValidationErrors) {
|
||||
// reconcile lists changed from granular to atomic
|
||||
// reconcile lists changed from granular to atomic.
|
||||
// Note that migrations from atomic to granular are not recommended and will
|
||||
// be treated as if they were always granular.
|
||||
//
|
||||
// In this case, the manager that owned the previously atomic field (and all subfields),
|
||||
// will now own just the top-level field and none of the subfields.
|
||||
if !v.isAtomic && t.ElementRelationship == schema.Atomic {
|
||||
v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields
|
||||
v.toAdd = fieldpath.NewSet(v.path) // add the root of the atomic
|
||||
return errs
|
||||
}
|
||||
// reconcile lists changed from atomic to granular
|
||||
if v.isAtomic && t.ElementRelationship == schema.Associative {
|
||||
v.toAdd, errs = buildGranularFieldSet(v.path, v.value)
|
||||
if errs != nil {
|
||||
return errs
|
||||
}
|
||||
}
|
||||
if v.fieldSet != nil {
|
||||
errs = v.visitListItems(t, v.fieldSet)
|
||||
}
|
||||
@@ -231,7 +222,18 @@ func (v *reconcileWithSchemaWalker) visitMapItems(t *schema.Map, element *fieldp
|
||||
}
|
||||
|
||||
func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors) {
|
||||
// reconcile maps and structs changed from granular to atomic
|
||||
// We don't currently reconcile deduced types (unstructured CRDs) or maps that contain only unknown
|
||||
// fields since deduced types do not yet support atomic or granular tags.
|
||||
if isUntypedDeducedMap(t) {
|
||||
return errs
|
||||
}
|
||||
|
||||
// reconcile maps and structs changed from granular to atomic.
|
||||
// Note that migrations from atomic to granular are not recommended and will
|
||||
// be treated as if they were always granular.
|
||||
//
|
||||
// In this case the manager that owned the previously atomic field (and all subfields),
|
||||
// will now own just the top-level field and none of the subfields.
|
||||
if !v.isAtomic && t.ElementRelationship == schema.Atomic {
|
||||
if v.fieldSet != nil && v.fieldSet.Size() > 0 {
|
||||
v.toRemove = fieldpath.NewSet(v.path) // remove all root and all children fields
|
||||
@@ -239,34 +241,12 @@ func (v *reconcileWithSchemaWalker) doMap(t *schema.Map) (errs ValidationErrors)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
// reconcile maps changed from atomic to granular
|
||||
if v.isAtomic && (t.ElementRelationship == schema.Separable || t.ElementRelationship == "") {
|
||||
v.toAdd, errs = buildGranularFieldSet(v.path, v.value)
|
||||
if errs != nil {
|
||||
return errs
|
||||
}
|
||||
}
|
||||
if v.fieldSet != nil {
|
||||
errs = v.visitMapItems(t, v.fieldSet)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func buildGranularFieldSet(path fieldpath.Path, value *TypedValue) (*fieldpath.Set, ValidationErrors) {
|
||||
|
||||
valueFieldSet, err := value.ToFieldSet()
|
||||
if err != nil {
|
||||
return nil, errorf("toFieldSet: %v", err)
|
||||
}
|
||||
if valueFieldSetAtPath, ok := fieldSetAtPath(valueFieldSet, path); ok {
|
||||
result := fieldpath.NewSet(path)
|
||||
resultAtPath := descendToPath(result, path)
|
||||
*resultAtPath = *valueFieldSetAtPath
|
||||
return result, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func fieldSetAtPath(node *fieldpath.Set, path fieldpath.Path) (*fieldpath.Set, bool) {
|
||||
ok := true
|
||||
for _, pe := range path {
|
||||
@@ -293,3 +273,18 @@ func typeRefAtPath(t *schema.Map, pe fieldpath.PathElement) (schema.TypeRef, boo
|
||||
}
|
||||
return tr, tr != schema.TypeRef{}
|
||||
}
|
||||
|
||||
// isUntypedDeducedMap returns true if m has no fields defined, but allows untyped elements.
|
||||
// This is equivalent to a openAPI object that has x-kubernetes-preserve-unknown-fields=true
|
||||
// but does not have any properties defined on the object.
|
||||
func isUntypedDeducedMap(m *schema.Map) bool {
|
||||
return isUntypedDeducedRef(m.ElementType) && m.Fields == nil
|
||||
}
|
||||
|
||||
func isUntypedDeducedRef(t schema.TypeRef) bool {
|
||||
if t.NamedType != nil {
|
||||
return *t.NamedType == "__untyped_deduced_"
|
||||
}
|
||||
atom := t.Inlined
|
||||
return atom.Scalar != nil && *atom.Scalar == "untyped"
|
||||
}
|
||||
|
||||
37
vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go
generated
vendored
37
vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go
generated
vendored
@@ -51,10 +51,22 @@ func (w *removingWalker) doScalar(t *schema.Scalar) ValidationErrors {
|
||||
}
|
||||
|
||||
func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) {
|
||||
if !w.value.IsList() {
|
||||
return nil
|
||||
}
|
||||
l := w.value.AsListUsing(w.allocator)
|
||||
defer w.allocator.Free(l)
|
||||
// If list is null, empty, or atomic just return
|
||||
if l == nil || l.Length() == 0 || t.ElementRelationship == schema.Atomic {
|
||||
// If list is null or empty just return
|
||||
if l == nil || l.Length() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// atomic lists should return everything in the case of extract
|
||||
// and nothing in the case of remove (!w.shouldExtract)
|
||||
if t.ElementRelationship == schema.Atomic {
|
||||
if w.shouldExtract {
|
||||
w.out = w.value.Unstructured()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -70,7 +82,7 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) {
|
||||
// but ignore them when we are removing (i.e. !w.shouldExtract)
|
||||
if w.toRemove.Has(path) {
|
||||
if w.shouldExtract {
|
||||
newItems = append(newItems, item.Unstructured())
|
||||
newItems = append(newItems, removeItemsWithSchema(item, w.toRemove, w.schema, t.ElementType, w.shouldExtract).Unstructured())
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
@@ -92,12 +104,24 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) {
|
||||
}
|
||||
|
||||
func (w *removingWalker) doMap(t *schema.Map) ValidationErrors {
|
||||
if !w.value.IsMap() {
|
||||
return nil
|
||||
}
|
||||
m := w.value.AsMapUsing(w.allocator)
|
||||
if m != nil {
|
||||
defer w.allocator.Free(m)
|
||||
}
|
||||
// If map is null, empty, or atomic just return
|
||||
if m == nil || m.Empty() || t.ElementRelationship == schema.Atomic {
|
||||
// If map is null or empty just return
|
||||
if m == nil || m.Empty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// atomic maps should return everything in the case of extract
|
||||
// and nothing in the case of remove (!w.shouldExtract)
|
||||
if t.ElementRelationship == schema.Atomic {
|
||||
if w.shouldExtract {
|
||||
w.out = w.value.Unstructured()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -118,7 +142,8 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors {
|
||||
// but ignore them when we are removing (i.e. !w.shouldExtract)
|
||||
if w.toRemove.Has(path) {
|
||||
if w.shouldExtract {
|
||||
newMap[k] = val.Unstructured()
|
||||
newMap[k] = removeItemsWithSchema(val, w.toRemove, w.schema, fieldType, w.shouldExtract).Unstructured()
|
||||
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user