Unverified Commit c7a91e1c authored by Derek Nola's avatar Derek Nola Committed by GitHub

Update to v1.20.15 (#4995)

* Backport https://github.com/k3s-io/k3s/pull/4861Signed-off-by: 's avatarDerek Nola <derek.nola@suse.com> * Update to v1.20.15 Signed-off-by: 's avatarDerek Nola <derek.nola@suse.com>
parent 29b4bd8a
...@@ -42,7 +42,7 @@ ENV GO111MODULE off ...@@ -42,7 +42,7 @@ ENV GO111MODULE off
ENV DAPPER_RUN_ARGS --privileged -v k3s-cache:/go/src/github.com/rancher/k3s/.cache -v trivy-cache:/root/.cache/trivy ENV DAPPER_RUN_ARGS --privileged -v k3s-cache:/go/src/github.com/rancher/k3s/.cache -v trivy-cache:/root/.cache/trivy
ENV DAPPER_ENV REPO TAG DRONE_TAG IMAGE_NAME SKIP_VALIDATE GCLOUD_AUTH GITHUB_TOKEN GOLANG ENV DAPPER_ENV REPO TAG DRONE_TAG IMAGE_NAME SKIP_VALIDATE GCLOUD_AUTH GITHUB_TOKEN GOLANG
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3s/ ENV DAPPER_SOURCE /go/src/github.com/rancher/k3s/
ENV DAPPER_OUTPUT ./bin ./dist ./build/out ENV DAPPER_OUTPUT ./bin ./dist ./build/out ./build/static ./pkg/static ./pkg/deploy ./pkg/data
ENV DAPPER_DOCKER_SOCKET true ENV DAPPER_DOCKER_SOCKET true
ENV HOME ${DAPPER_SOURCE} ENV HOME ${DAPPER_SOURCE}
ENV CROSS true ENV CROSS true
......
...@@ -22,11 +22,6 @@ release: ...@@ -22,11 +22,6 @@ release:
.PHONY: $(TARGETS) .PHONY: $(TARGETS)
.PHONY: generate
generate: build/data
./scripts/download
go generate
build/data: build/data:
mkdir -p $@ mkdir -p $@
......
...@@ -38,34 +38,34 @@ replace ( ...@@ -38,34 +38,34 @@ replace (
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
google.golang.org/grpc => google.golang.org/grpc v1.27.1 google.golang.org/grpc => google.golang.org/grpc v1.27.1
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2 gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.14-k3s1 k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.15-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.14-k3s1 k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.15-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.14-k3s1 k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.15-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.14-k3s1 k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.15-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.14-k3s1 k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.15-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.14-k3s1 k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.15-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.14-k3s1 k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.15-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.14-k3s1 k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.15-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.14-k3s1 k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.15-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.14-k3s1 k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.15-k3s1
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.14-k3s1 k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.15-k3s1
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.14-k3s1 k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.15-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.14-k3s1 k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.15-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.14-k3s1 k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.15-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.14-k3s1 k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.15-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.14-k3s1 k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.15-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.14-k3s1 k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.15-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.14-k3s1 k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.15-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.14-k3s1 k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.15-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.14-k3s1 k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.15-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.20.14-k3s1 k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.20.15-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.14-k3s1 k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.15-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.14-k3s1 k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.15-k3s1
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.14-k3s1 k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.15-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.20.14-k3s1 k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.20.15-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.20.14-k3s1 k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.20.15-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.20.14-k3s1 k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.20.15-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.20.14-k3s1 k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.20.15-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
) )
...@@ -122,17 +122,17 @@ require ( ...@@ -122,17 +122,17 @@ require (
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
google.golang.org/grpc v1.33.2 google.golang.org/grpc v1.33.2
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.20.14 k8s.io/api v0.20.15
k8s.io/apimachinery v0.20.14 k8s.io/apimachinery v0.20.15
k8s.io/apiserver v0.20.14 k8s.io/apiserver v0.20.15
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
k8s.io/cloud-provider v0.20.14 k8s.io/cloud-provider v0.20.15
k8s.io/component-base v0.20.14 k8s.io/component-base v0.20.15
k8s.io/controller-manager v0.20.14 k8s.io/controller-manager v0.20.15
k8s.io/cri-api v0.20.14 k8s.io/cri-api v0.20.15
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/kubectl v0.20.14 k8s.io/kubectl v0.20.15
k8s.io/kubernetes v1.20.14 k8s.io/kubernetes v1.20.15
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 k8s.io/utils v0.0.0-20201110183641-67b214c5f920
sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml v1.2.0
) )
...@@ -19,6 +19,7 @@ package admission ...@@ -19,6 +19,7 @@ package admission
import ( import (
"context" "context"
"fmt" "fmt"
"sync"
auditinternal "k8s.io/apiserver/pkg/apis/audit" auditinternal "k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/audit"
...@@ -27,7 +28,10 @@ import ( ...@@ -27,7 +28,10 @@ import (
// auditHandler logs annotations set by other admission handlers // auditHandler logs annotations set by other admission handlers
type auditHandler struct { type auditHandler struct {
Interface Interface
ae *auditinternal.Event // TODO: move the lock near the Annotations field of the audit event so it is always protected from concurrent access.
// to protect the 'Annotations' map of the audit event from concurrent writes
mutex sync.Mutex
ae *auditinternal.Event
} }
var _ Interface = &auditHandler{} var _ Interface = &auditHandler{}
...@@ -42,10 +46,10 @@ func WithAudit(i Interface, ae *auditinternal.Event) Interface { ...@@ -42,10 +46,10 @@ func WithAudit(i Interface, ae *auditinternal.Event) Interface {
if i == nil { if i == nil {
return i return i
} }
return &auditHandler{i, ae} return &auditHandler{Interface: i, ae: ae}
} }
func (handler auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error { func (handler *auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error {
if !handler.Interface.Handles(a.GetOperation()) { if !handler.Interface.Handles(a.GetOperation()) {
return nil return nil
} }
...@@ -60,7 +64,7 @@ func (handler auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInt ...@@ -60,7 +64,7 @@ func (handler auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInt
return err return err
} }
func (handler auditHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error { func (handler *auditHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error {
if !handler.Interface.Handles(a.GetOperation()) { if !handler.Interface.Handles(a.GetOperation()) {
return nil return nil
} }
...@@ -84,10 +88,13 @@ func ensureAnnotationGetter(a Attributes) error { ...@@ -84,10 +88,13 @@ func ensureAnnotationGetter(a Attributes) error {
return fmt.Errorf("attributes must be an instance of privateAnnotationsGetter or AnnotationsGetter") return fmt.Errorf("attributes must be an instance of privateAnnotationsGetter or AnnotationsGetter")
} }
func (handler auditHandler) logAnnotations(a Attributes) { func (handler *auditHandler) logAnnotations(a Attributes) {
if handler.ae == nil { if handler.ae == nil {
return return
} }
handler.mutex.Lock()
defer handler.mutex.Unlock()
switch a := a.(type) { switch a := a.(type) {
case privateAnnotationsGetter: case privateAnnotationsGetter:
for key, value := range a.getAnnotations(handler.ae.Level) { for key, value := range a.getAnnotations(handler.ae.Level) {
......
...@@ -146,7 +146,7 @@ type timeoutWriter interface { ...@@ -146,7 +146,7 @@ type timeoutWriter interface {
} }
func newTimeoutWriter(w http.ResponseWriter) timeoutWriter { func newTimeoutWriter(w http.ResponseWriter) timeoutWriter {
base := &baseTimeoutWriter{w: w} base := &baseTimeoutWriter{w: w, handlerHeaders: w.Header().Clone()}
_, notifiable := w.(http.CloseNotifier) _, notifiable := w.(http.CloseNotifier)
_, hijackable := w.(http.Hijacker) _, hijackable := w.(http.Hijacker)
...@@ -166,6 +166,9 @@ func newTimeoutWriter(w http.ResponseWriter) timeoutWriter { ...@@ -166,6 +166,9 @@ func newTimeoutWriter(w http.ResponseWriter) timeoutWriter {
type baseTimeoutWriter struct { type baseTimeoutWriter struct {
w http.ResponseWriter w http.ResponseWriter
// headers written by the normal handler
handlerHeaders http.Header
mu sync.Mutex mu sync.Mutex
// if the timeout handler has timeout // if the timeout handler has timeout
timedOut bool timedOut bool
...@@ -183,7 +186,7 @@ func (tw *baseTimeoutWriter) Header() http.Header { ...@@ -183,7 +186,7 @@ func (tw *baseTimeoutWriter) Header() http.Header {
return http.Header{} return http.Header{}
} }
return tw.w.Header() return tw.handlerHeaders
} }
func (tw *baseTimeoutWriter) Write(p []byte) (int, error) { func (tw *baseTimeoutWriter) Write(p []byte) (int, error) {
...@@ -197,7 +200,10 @@ func (tw *baseTimeoutWriter) Write(p []byte) (int, error) { ...@@ -197,7 +200,10 @@ func (tw *baseTimeoutWriter) Write(p []byte) (int, error) {
return 0, http.ErrHijacked return 0, http.ErrHijacked
} }
tw.wroteHeader = true if !tw.wroteHeader {
copyHeaders(tw.w.Header(), tw.handlerHeaders)
tw.wroteHeader = true
}
return tw.w.Write(p) return tw.w.Write(p)
} }
...@@ -222,10 +228,17 @@ func (tw *baseTimeoutWriter) WriteHeader(code int) { ...@@ -222,10 +228,17 @@ func (tw *baseTimeoutWriter) WriteHeader(code int) {
return return
} }
copyHeaders(tw.w.Header(), tw.handlerHeaders)
tw.wroteHeader = true tw.wroteHeader = true
tw.w.WriteHeader(code) tw.w.WriteHeader(code)
} }
func copyHeaders(dst, src http.Header) {
for k, v := range src {
dst[k] = v
}
}
func (tw *baseTimeoutWriter) timeout(err *apierrors.StatusError) { func (tw *baseTimeoutWriter) timeout(err *apierrors.StatusError) {
tw.mu.Lock() tw.mu.Lock()
defer tw.mu.Unlock() defer tw.mu.Unlock()
......
...@@ -3,8 +3,8 @@ package version ...@@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "20" gitMinor = "20"
gitVersion = "v1.20.14-k3s1" gitVersion = "v1.20.15-k3s1"
gitCommit = "c9ccd1687e39f0ae9ff460dbeed44d8a528d8f47" gitCommit = "5312135011ce191fcf1bd1aa6099f2d3d690f57e"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2021-12-15T21:14:12Z" buildDate = "2022-01-19T23:16:01Z"
) )
...@@ -149,7 +149,10 @@ func (q *Type) Get() (item interface{}, shutdown bool) { ...@@ -149,7 +149,10 @@ func (q *Type) Get() (item interface{}, shutdown bool) {
return nil, true return nil, true
} }
item, q.queue = q.queue[0], q.queue[1:] item = q.queue[0]
// The underlying array still exists and reference this object, so the object will not be garbage collected.
q.queue[0] = nil
q.queue = q.queue[1:]
q.metrics.get(item) q.metrics.get(item)
......
...@@ -3,8 +3,8 @@ package version ...@@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "20" gitMinor = "20"
gitVersion = "v1.20.14-k3s1" gitVersion = "v1.20.15-k3s1"
gitCommit = "c9ccd1687e39f0ae9ff460dbeed44d8a528d8f47" gitCommit = "5312135011ce191fcf1bd1aa6099f2d3d690f57e"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2021-12-15T21:14:12Z" buildDate = "2022-01-19T23:16:01Z"
) )
...@@ -90,6 +90,9 @@ const ( ...@@ -90,6 +90,9 @@ const (
DefaultDummyDevice = "kube-ipvs0" DefaultDummyDevice = "kube-ipvs0"
connReuseMinSupportedKernelVersion = "4.1" connReuseMinSupportedKernelVersion = "4.1"
// https://github.com/torvalds/linux/commit/35dfb013149f74c2be1ff9c78f14e6a3cd1539d1
connReuseFixedKernelVersion = "5.9"
) )
// iptablesJumpChain is tables of iptables chains that ipvs proxier used to install iptables or cleanup iptables. // iptablesJumpChain is tables of iptables chains that ipvs proxier used to install iptables or cleanup iptables.
...@@ -387,6 +390,9 @@ func NewProxier(ipt utiliptables.Interface, ...@@ -387,6 +390,9 @@ func NewProxier(ipt utiliptables.Interface,
} }
if kernelVersion.LessThan(version.MustParseGeneric(connReuseMinSupportedKernelVersion)) { if kernelVersion.LessThan(version.MustParseGeneric(connReuseMinSupportedKernelVersion)) {
klog.Errorf("can't set sysctl %s, kernel version must be at least %s", sysctlConnReuse, connReuseMinSupportedKernelVersion) klog.Errorf("can't set sysctl %s, kernel version must be at least %s", sysctlConnReuse, connReuseMinSupportedKernelVersion)
} else if kernelVersion.AtLeast(version.MustParseGeneric(connReuseFixedKernelVersion)) {
// https://github.com/kubernetes/kubernetes/issues/93297
klog.V(2).InfoS("Left as-is", "sysctl", sysctlConnReuse)
} else { } else {
// Set the connection reuse mode // Set the connection reuse mode
if err := utilproxy.EnsureSysctl(sysctl, sysctlConnReuse, 0); err != nil { if err := utilproxy.EnsureSysctl(sysctl, sysctlConnReuse, 0); err != nil {
......
...@@ -545,6 +545,7 @@ func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod ...@@ -545,6 +545,7 @@ func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
if err != nil { if err != nil {
klog.Errorf("Error getting label selectors for pod: %v.", up.Name) klog.Errorf("Error getting label selectors for pod: %v.", up.Name)
continue
} }
if util.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { if util.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) {
podsToMove = append(podsToMove, pInfo) podsToMove = append(podsToMove, pInfo)
......
...@@ -38,6 +38,9 @@ func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffini ...@@ -38,6 +38,9 @@ func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffini
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod> // PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
// matches the namespace and selector defined by <affinityPod>`s <term>. // matches the namespace and selector defined by <affinityPod>`s <term>.
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, selector labels.Selector) bool { func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, selector labels.Selector) bool {
if selector == nil {
return false
}
if !namespaces.Has(pod.Namespace) { if !namespaces.Has(pod.Namespace) {
return false return false
} }
......
...@@ -98,6 +98,14 @@ const ( ...@@ -98,6 +98,14 @@ const (
// LabelFailureDomainBetaRegion failure-domain region label // LabelFailureDomainBetaRegion failure-domain region label
LabelFailureDomainBetaRegion = "failure-domain.beta.kubernetes.io/region" LabelFailureDomainBetaRegion = "failure-domain.beta.kubernetes.io/region"
// LabelNodeExcludeBalancers specifies that the node should not be considered as a target
// for external load-balancers which use nodes as a second hop (e.g. many cloud LBs which only
// understand nodes). For services that use externalTrafficPolicy=Local, this may mean that
// any backends on excluded nodes are not reachable by those external load-balancers.
// Implementations of this exclusion may vary based on provider. This label is honored starting
// in 1.16 when the ServiceNodeExclusion gate is on.
LabelNodeExcludeBalancers = "node.kubernetes.io/exclude-from-external-load-balancers"
) )
const ( const (
...@@ -281,6 +289,8 @@ type Cloud struct { ...@@ -281,6 +289,8 @@ type Cloud struct {
nodeResourceGroups map[string]string nodeResourceGroups map[string]string
// unmanagedNodes holds a list of nodes not managed by Azure cloud provider. // unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
unmanagedNodes sets.String unmanagedNodes sets.String
// excludeLoadBalancerNodes holds a list of nodes that should be excluded from LoadBalancer.
excludeLoadBalancerNodes sets.String
// nodeInformerSynced is for determining if the informer has synced. // nodeInformerSynced is for determining if the informer has synced.
nodeInformerSynced cache.InformerSynced nodeInformerSynced cache.InformerSynced
...@@ -342,10 +352,11 @@ func NewCloudWithoutFeatureGates(configReader io.Reader) (*Cloud, error) { ...@@ -342,10 +352,11 @@ func NewCloudWithoutFeatureGates(configReader io.Reader) (*Cloud, error) {
} }
az := &Cloud{ az := &Cloud{
nodeZones: map[string]sets.String{}, nodeZones: map[string]sets.String{},
nodeResourceGroups: map[string]string{}, nodeResourceGroups: map[string]string{},
unmanagedNodes: sets.NewString(), unmanagedNodes: sets.NewString(),
routeCIDRs: map[string]string{}, excludeLoadBalancerNodes: sets.NewString(),
routeCIDRs: map[string]string{},
} }
err = az.InitializeCloudFromConfig(config, false) err = az.InitializeCloudFromConfig(config, false)
...@@ -748,8 +759,8 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { ...@@ -748,8 +759,8 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
UpdateFunc: func(prev, obj interface{}) { UpdateFunc: func(prev, obj interface{}) {
prevNode := prev.(*v1.Node) prevNode := prev.(*v1.Node)
newNode := obj.(*v1.Node) newNode := obj.(*v1.Node)
if newNode.Labels[LabelFailureDomainBetaZone] == if newNode.Labels[v1.LabelTopologyZone] ==
prevNode.Labels[LabelFailureDomainBetaZone] { prevNode.Labels[v1.LabelTopologyZone] {
return return
} }
az.updateNodeCaches(prevNode, newNode) az.updateNodeCaches(prevNode, newNode)
...@@ -801,6 +812,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { ...@@ -801,6 +812,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel] managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel]
if ok && managed == "false" { if ok && managed == "false" {
az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name) az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
}
// Remove from excludeLoadBalancerNodes cache.
if _, hasExcludeBalancerLabel := prevNode.ObjectMeta.Labels[LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
} }
} }
...@@ -824,6 +841,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { ...@@ -824,6 +841,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel] managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel]
if ok && managed == "false" { if ok && managed == "false" {
az.unmanagedNodes.Insert(newNode.ObjectMeta.Name) az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
}
// Add to excludeLoadBalancerNodes cache.
if _, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
} }
} }
} }
...@@ -913,16 +936,23 @@ func (az *Cloud) GetUnmanagedNodes() (sets.String, error) { ...@@ -913,16 +936,23 @@ func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
return sets.NewString(az.unmanagedNodes.List()...), nil return sets.NewString(az.unmanagedNodes.List()...), nil
} }
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged or in external resource group. // ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged, in external resource group or labeled with "node.kubernetes.io/exclude-from-external-load-balancers".
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(node *v1.Node) bool { func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, error) {
labels := node.ObjectMeta.Labels // Kubelet won't set az.nodeInformerSynced, always return nil.
if rg, ok := labels[externalResourceGroupLabel]; ok && !strings.EqualFold(rg, az.ResourceGroup) { if az.nodeInformerSynced == nil {
return true return false, nil
}
az.nodeCachesLock.RLock()
defer az.nodeCachesLock.RUnlock()
if !az.nodeInformerSynced() {
return false, fmt.Errorf("node informer is not synced when trying to fetch node caches")
} }
if managed, ok := labels[managedByAzureLabel]; ok && managed == "false" { // Return true if the node is in external resource group.
return true if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok && !strings.EqualFold(cachedRG, az.ResourceGroup) {
return true, nil
} }
return false return az.excludeLoadBalancerNodes.Has(nodeName), nil
} }
...@@ -65,12 +65,13 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) { ...@@ -65,12 +65,13 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
MaximumLoadBalancerRuleCount: 250, MaximumLoadBalancerRuleCount: 250,
VMType: vmTypeStandard, VMType: vmTypeStandard,
}, },
nodeZones: map[string]sets.String{}, nodeZones: map[string]sets.String{},
nodeInformerSynced: func() bool { return true }, nodeInformerSynced: func() bool { return true },
nodeResourceGroups: map[string]string{}, nodeResourceGroups: map[string]string{},
unmanagedNodes: sets.NewString(), unmanagedNodes: sets.NewString(),
routeCIDRs: map[string]string{}, excludeLoadBalancerNodes: sets.NewString(),
eventRecorder: &record.FakeRecorder{}, routeCIDRs: map[string]string{},
eventRecorder: &record.FakeRecorder{},
} }
az.DisksClient = mockdiskclient.NewMockInterface(ctrl) az.DisksClient = mockdiskclient.NewMockInterface(ctrl)
az.InterfacesClient = mockinterfaceclient.NewMockInterface(ctrl) az.InterfacesClient = mockinterfaceclient.NewMockInterface(ctrl)
......
...@@ -343,7 +343,7 @@ func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer, ...@@ -343,7 +343,7 @@ func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer,
}, },
} }
// decouple the backendPool from the node // decouple the backendPool from the node
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted) err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -1083,15 +1083,6 @@ func (az *Cloud) findFrontendIPConfigOfService( ...@@ -1083,15 +1083,6 @@ func (az *Cloud) findFrontendIPConfigOfService(
return nil, false, nil return nil, false, nil
} }
func nodeNameInNodes(nodeName string, nodes []*v1.Node) bool {
for _, node := range nodes {
if strings.EqualFold(nodeName, node.Name) {
return true
}
}
return false
}
// reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup. // reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup.
// This also reconciles the Service's Ports with the LoadBalancer config. // This also reconciles the Service's Ports with the LoadBalancer config.
// This entails adding rules/probes for expected Ports and removing stale rules/ports. // This entails adding rules/probes for expected Ports and removing stale rules/ports.
...@@ -1147,7 +1138,12 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, ...@@ -1147,7 +1138,12 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
// would not be in the `nodes` slice. We need to check the nodes that // would not be in the `nodes` slice. We need to check the nodes that
// have been added to the LB's backendpool, find the unwanted ones and // have been added to the LB's backendpool, find the unwanted ones and
// delete them from the pool. // delete them from the pool.
if !nodeNameInNodes(nodeName, nodes) { shouldExcludeLoadBalancer, err := az.ShouldNodeExcludedFromLoadBalancer(nodeName)
if err != nil {
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
return nil, err
}
if shouldExcludeLoadBalancer {
klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB", serviceName, wantLb, nodeName) klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB", serviceName, wantLb, nodeName)
// construct a backendPool that only contains the IP config of the node to be deleted // construct a backendPool that only contains the IP config of the node to be deleted
backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)}) backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)})
...@@ -1164,7 +1160,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, ...@@ -1164,7 +1160,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
} }
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
// decouple the backendPool from the node // decouple the backendPool from the node
err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted) err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
...@@ -1448,21 +1444,36 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, ...@@ -1448,21 +1444,36 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
// do nothing for availability set // do nothing for availability set
lb.BackendAddressPools = nil lb.BackendAddressPools = nil
} }
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools) err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true)
if err != nil { if err != nil {
klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err) klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err)
return nil, err return nil, err
} }
klog.V(10).Infof("EnsureBackendPoolDeleted(%s) for service %s: end", lbBackendPoolID, serviceName) klog.V(10).Infof("EnsureBackendPoolDeleted(%s) for service %s: end", lbBackendPoolID, serviceName)
// Remove the LB. existingLBs, err := az.ListLB(service)
klog.V(10).Infof("reconcileLoadBalancer: az.DeleteLB(%q): start", lbName)
err = az.DeleteLB(service, lbName)
if err != nil { if err != nil {
klog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) klog.Errorf("reconcileLoadBalancer: failed to list load balancer for service %q: %s", serviceName, err.Error())
return nil, err return nil, err
} }
klog.V(10).Infof("az.DeleteLB(%q): end", lbName)
foundLB := false
for _, existingLB := range existingLBs {
if strings.EqualFold(lbName, to.String(existingLB.Name)) {
foundLB = true
break
}
}
// Remove the LB if it exists.
if foundLB {
klog.V(10).Infof("reconcileLoadBalancer: az.DeleteLB(%q): start", lbName)
err = az.DeleteLB(service, lbName)
if err != nil {
klog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName)
return nil, err
}
klog.V(10).Infof("az.DeleteLB(%q): end", lbName)
}
} }
} else { } else {
klog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName) klog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName)
...@@ -1665,7 +1676,8 @@ func (az *Cloud) reconcileLoadBalancerRule( ...@@ -1665,7 +1676,8 @@ func (az *Cloud) reconcileLoadBalancerRule(
if probeProtocol == "" { if probeProtocol == "" {
probeProtocol = string(network.ProbeProtocolHTTP) probeProtocol = string(network.ProbeProtocolHTTP)
} }
if requestPath == "" { needRequestPath := strings.EqualFold(probeProtocol, string(network.ProbeProtocolHTTP)) || strings.EqualFold(probeProtocol, string(network.ProbeProtocolHTTPS))
if requestPath == "" && needRequestPath {
requestPath = podPresencePath requestPath = podPresencePath
} }
......
...@@ -914,7 +914,12 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No ...@@ -914,7 +914,12 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
continue continue
} }
if as.ShouldNodeExcludedFromLoadBalancer(node) { shouldExcludeLoadBalancer, err := as.ShouldNodeExcludedFromLoadBalancer(localNodeName)
if err != nil {
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
return err
}
if shouldExcludeLoadBalancer {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
continue continue
} }
...@@ -939,7 +944,7 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No ...@@ -939,7 +944,7 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
} }
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes. // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
// Returns nil if backend address pools already deleted. // Returns nil if backend address pools already deleted.
if backendAddressPools == nil { if backendAddressPools == nil {
return nil return nil
......
...@@ -64,7 +64,7 @@ type VMSet interface { ...@@ -64,7 +64,7 @@ type VMSet interface {
// participating in the specified LoadBalancer Backend Pool. // participating in the specified LoadBalancer Backend Pool.
EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) (string, string, string, *compute.VirtualMachineScaleSetVM, error) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) (string, string, string, *compute.VirtualMachineScaleSetVM, error)
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes. // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun. // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error
......
...@@ -743,11 +743,16 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) { ...@@ -743,11 +743,16 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
continue continue
} }
if ss.ShouldNodeExcludedFromLoadBalancer(nodes[nx]) { nodeName := nodes[nx].Name
shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(nodeName)
if err != nil {
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
return nil, err
}
if shouldExcludeLoadBalancer {
continue continue
} }
nodeName := nodes[nx].Name
ssName, _, _, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault) ssName, _, _, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
if err != nil { if err != nil {
return nil, err return nil, err
...@@ -1105,7 +1110,12 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back ...@@ -1105,7 +1110,12 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
continue continue
} }
if ss.ShouldNodeExcludedFromLoadBalancer(node) { shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(node.Name)
if err != nil {
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", node.Name, err)
return err
}
if shouldExcludeLoadBalancer {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name) klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name)
continue continue
} }
...@@ -1248,7 +1258,12 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac ...@@ -1248,7 +1258,12 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
continue continue
} }
if ss.ShouldNodeExcludedFromLoadBalancer(node) { shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(localNodeName)
if err != nil {
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
return err
}
if shouldExcludeLoadBalancer {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
continue continue
} }
...@@ -1569,7 +1584,7 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen ...@@ -1569,7 +1584,7 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
} }
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes. // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
// Returns nil if backend address pools already deleted. // Returns nil if backend address pools already deleted.
if backendAddressPools == nil { if backendAddressPools == nil {
return nil return nil
...@@ -1681,9 +1696,11 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, ...@@ -1681,9 +1696,11 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
} }
// Ensure the backendPoolID is also deleted on VMSS itself. // Ensure the backendPoolID is also deleted on VMSS itself.
err := ss.ensureBackendPoolDeletedFromVMSS(service, backendPoolID, vmSetName, ipConfigurationIDs) if deleteFromVMSet {
if err != nil { err := ss.ensureBackendPoolDeletedFromVMSS(service, backendPoolID, vmSetName, ipConfigurationIDs)
return err if err != nil {
return err
}
} }
isOperationSucceeded = true isOperationSucceeded = true
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment