Unverified Commit c7a91e1c authored by Derek Nola's avatar Derek Nola Committed by GitHub

Update to v1.20.15 (#4995)

* Backport https://github.com/k3s-io/k3s/pull/4861Signed-off-by: 's avatarDerek Nola <derek.nola@suse.com> * Update to v1.20.15 Signed-off-by: 's avatarDerek Nola <derek.nola@suse.com>
parent 29b4bd8a
......@@ -42,7 +42,7 @@ ENV GO111MODULE off
ENV DAPPER_RUN_ARGS --privileged -v k3s-cache:/go/src/github.com/rancher/k3s/.cache -v trivy-cache:/root/.cache/trivy
ENV DAPPER_ENV REPO TAG DRONE_TAG IMAGE_NAME SKIP_VALIDATE GCLOUD_AUTH GITHUB_TOKEN GOLANG
ENV DAPPER_SOURCE /go/src/github.com/rancher/k3s/
ENV DAPPER_OUTPUT ./bin ./dist ./build/out
ENV DAPPER_OUTPUT ./bin ./dist ./build/out ./build/static ./pkg/static ./pkg/deploy ./pkg/data
ENV DAPPER_DOCKER_SOCKET true
ENV HOME ${DAPPER_SOURCE}
ENV CROSS true
......
......@@ -22,11 +22,6 @@ release:
.PHONY: $(TARGETS)
.PHONY: generate
generate: build/data
./scripts/download
go generate
build/data:
mkdir -p $@
......
......@@ -38,34 +38,34 @@ replace (
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
google.golang.org/grpc => google.golang.org/grpc v1.27.1
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.14-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.14-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.14-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.14-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.14-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.14-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.14-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.14-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.14-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.14-k3s1
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.14-k3s1
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.14-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.14-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.14-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.14-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.14-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.14-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.14-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.14-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.14-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.20.14-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.14-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.14-k3s1
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.14-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.20.14-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.20.14-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.20.14-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.20.14-k3s1
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.15-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.15-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.15-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.15-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.15-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.15-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.15-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.15-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.15-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.15-k3s1
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.15-k3s1
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.15-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.15-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.15-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.15-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.15-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.15-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.15-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.15-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.15-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.20.15-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.15-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.15-k3s1
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.15-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.20.15-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.20.15-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.20.15-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.20.15-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
)
......@@ -122,17 +122,17 @@ require (
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
google.golang.org/grpc v1.33.2
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.20.14
k8s.io/apimachinery v0.20.14
k8s.io/apiserver v0.20.14
k8s.io/api v0.20.15
k8s.io/apimachinery v0.20.15
k8s.io/apiserver v0.20.15
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
k8s.io/cloud-provider v0.20.14
k8s.io/component-base v0.20.14
k8s.io/controller-manager v0.20.14
k8s.io/cri-api v0.20.14
k8s.io/cloud-provider v0.20.15
k8s.io/component-base v0.20.15
k8s.io/controller-manager v0.20.15
k8s.io/cri-api v0.20.15
k8s.io/klog v1.0.0
k8s.io/kubectl v0.20.14
k8s.io/kubernetes v1.20.14
k8s.io/kubectl v0.20.15
k8s.io/kubernetes v1.20.15
k8s.io/utils v0.0.0-20201110183641-67b214c5f920
sigs.k8s.io/yaml v1.2.0
)
......@@ -19,6 +19,7 @@ package admission
import (
"context"
"fmt"
"sync"
auditinternal "k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/audit"
......@@ -27,6 +28,9 @@ import (
// auditHandler logs annotations set by other admission handlers
type auditHandler struct {
Interface
// TODO: move the lock near the Annotations field of the audit event so it is always protected from concurrent access.
// to protect the 'Annotations' map of the audit event from concurrent writes
mutex sync.Mutex
ae *auditinternal.Event
}
......@@ -42,10 +46,10 @@ func WithAudit(i Interface, ae *auditinternal.Event) Interface {
if i == nil {
return i
}
return &auditHandler{i, ae}
return &auditHandler{Interface: i, ae: ae}
}
func (handler auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error {
func (handler *auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInterfaces) error {
if !handler.Interface.Handles(a.GetOperation()) {
return nil
}
......@@ -60,7 +64,7 @@ func (handler auditHandler) Admit(ctx context.Context, a Attributes, o ObjectInt
return err
}
func (handler auditHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error {
func (handler *auditHandler) Validate(ctx context.Context, a Attributes, o ObjectInterfaces) error {
if !handler.Interface.Handles(a.GetOperation()) {
return nil
}
......@@ -84,10 +88,13 @@ func ensureAnnotationGetter(a Attributes) error {
return fmt.Errorf("attributes must be an instance of privateAnnotationsGetter or AnnotationsGetter")
}
func (handler auditHandler) logAnnotations(a Attributes) {
func (handler *auditHandler) logAnnotations(a Attributes) {
if handler.ae == nil {
return
}
handler.mutex.Lock()
defer handler.mutex.Unlock()
switch a := a.(type) {
case privateAnnotationsGetter:
for key, value := range a.getAnnotations(handler.ae.Level) {
......
......@@ -146,7 +146,7 @@ type timeoutWriter interface {
}
func newTimeoutWriter(w http.ResponseWriter) timeoutWriter {
base := &baseTimeoutWriter{w: w}
base := &baseTimeoutWriter{w: w, handlerHeaders: w.Header().Clone()}
_, notifiable := w.(http.CloseNotifier)
_, hijackable := w.(http.Hijacker)
......@@ -166,6 +166,9 @@ func newTimeoutWriter(w http.ResponseWriter) timeoutWriter {
type baseTimeoutWriter struct {
w http.ResponseWriter
// headers written by the normal handler
handlerHeaders http.Header
mu sync.Mutex
// if the timeout handler has timeout
timedOut bool
......@@ -183,7 +186,7 @@ func (tw *baseTimeoutWriter) Header() http.Header {
return http.Header{}
}
return tw.w.Header()
return tw.handlerHeaders
}
func (tw *baseTimeoutWriter) Write(p []byte) (int, error) {
......@@ -197,7 +200,10 @@ func (tw *baseTimeoutWriter) Write(p []byte) (int, error) {
return 0, http.ErrHijacked
}
if !tw.wroteHeader {
copyHeaders(tw.w.Header(), tw.handlerHeaders)
tw.wroteHeader = true
}
return tw.w.Write(p)
}
......@@ -222,10 +228,17 @@ func (tw *baseTimeoutWriter) WriteHeader(code int) {
return
}
copyHeaders(tw.w.Header(), tw.handlerHeaders)
tw.wroteHeader = true
tw.w.WriteHeader(code)
}
func copyHeaders(dst, src http.Header) {
for k, v := range src {
dst[k] = v
}
}
func (tw *baseTimeoutWriter) timeout(err *apierrors.StatusError) {
tw.mu.Lock()
defer tw.mu.Unlock()
......
......@@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "20"
gitVersion = "v1.20.14-k3s1"
gitCommit = "c9ccd1687e39f0ae9ff460dbeed44d8a528d8f47"
gitVersion = "v1.20.15-k3s1"
gitCommit = "5312135011ce191fcf1bd1aa6099f2d3d690f57e"
gitTreeState = "clean"
buildDate = "2021-12-15T21:14:12Z"
buildDate = "2022-01-19T23:16:01Z"
)
......@@ -149,7 +149,10 @@ func (q *Type) Get() (item interface{}, shutdown bool) {
return nil, true
}
item, q.queue = q.queue[0], q.queue[1:]
item = q.queue[0]
// The underlying array still exists and reference this object, so the object will not be garbage collected.
q.queue[0] = nil
q.queue = q.queue[1:]
q.metrics.get(item)
......
......@@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "20"
gitVersion = "v1.20.14-k3s1"
gitCommit = "c9ccd1687e39f0ae9ff460dbeed44d8a528d8f47"
gitVersion = "v1.20.15-k3s1"
gitCommit = "5312135011ce191fcf1bd1aa6099f2d3d690f57e"
gitTreeState = "clean"
buildDate = "2021-12-15T21:14:12Z"
buildDate = "2022-01-19T23:16:01Z"
)
......@@ -90,6 +90,9 @@ const (
DefaultDummyDevice = "kube-ipvs0"
connReuseMinSupportedKernelVersion = "4.1"
// https://github.com/torvalds/linux/commit/35dfb013149f74c2be1ff9c78f14e6a3cd1539d1
connReuseFixedKernelVersion = "5.9"
)
// iptablesJumpChain is tables of iptables chains that ipvs proxier used to install iptables or cleanup iptables.
......@@ -387,6 +390,9 @@ func NewProxier(ipt utiliptables.Interface,
}
if kernelVersion.LessThan(version.MustParseGeneric(connReuseMinSupportedKernelVersion)) {
klog.Errorf("can't set sysctl %s, kernel version must be at least %s", sysctlConnReuse, connReuseMinSupportedKernelVersion)
} else if kernelVersion.AtLeast(version.MustParseGeneric(connReuseFixedKernelVersion)) {
// https://github.com/kubernetes/kubernetes/issues/93297
klog.V(2).InfoS("Left as-is", "sysctl", sysctlConnReuse)
} else {
// Set the connection reuse mode
if err := utilproxy.EnsureSysctl(sysctl, sysctlConnReuse, 0); err != nil {
......
......@@ -545,6 +545,7 @@ func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
if err != nil {
klog.Errorf("Error getting label selectors for pod: %v.", up.Name)
continue
}
if util.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) {
podsToMove = append(podsToMove, pInfo)
......
......@@ -38,6 +38,9 @@ func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffini
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
// matches the namespace and selector defined by <affinityPod>`s <term>.
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, selector labels.Selector) bool {
if selector == nil {
return false
}
if !namespaces.Has(pod.Namespace) {
return false
}
......
......@@ -98,6 +98,14 @@ const (
// LabelFailureDomainBetaRegion failure-domain region label
LabelFailureDomainBetaRegion = "failure-domain.beta.kubernetes.io/region"
// LabelNodeExcludeBalancers specifies that the node should not be considered as a target
// for external load-balancers which use nodes as a second hop (e.g. many cloud LBs which only
// understand nodes). For services that use externalTrafficPolicy=Local, this may mean that
// any backends on excluded nodes are not reachable by those external load-balancers.
// Implementations of this exclusion may vary based on provider. This label is honored starting
// in 1.16 when the ServiceNodeExclusion gate is on.
LabelNodeExcludeBalancers = "node.kubernetes.io/exclude-from-external-load-balancers"
)
const (
......@@ -281,6 +289,8 @@ type Cloud struct {
nodeResourceGroups map[string]string
// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
unmanagedNodes sets.String
// excludeLoadBalancerNodes holds a list of nodes that should be excluded from LoadBalancer.
excludeLoadBalancerNodes sets.String
// nodeInformerSynced is for determining if the informer has synced.
nodeInformerSynced cache.InformerSynced
......@@ -345,6 +355,7 @@ func NewCloudWithoutFeatureGates(configReader io.Reader) (*Cloud, error) {
nodeZones: map[string]sets.String{},
nodeResourceGroups: map[string]string{},
unmanagedNodes: sets.NewString(),
excludeLoadBalancerNodes: sets.NewString(),
routeCIDRs: map[string]string{},
}
......@@ -748,8 +759,8 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
UpdateFunc: func(prev, obj interface{}) {
prevNode := prev.(*v1.Node)
newNode := obj.(*v1.Node)
if newNode.Labels[LabelFailureDomainBetaZone] ==
prevNode.Labels[LabelFailureDomainBetaZone] {
if newNode.Labels[v1.LabelTopologyZone] ==
prevNode.Labels[v1.LabelTopologyZone] {
return
}
az.updateNodeCaches(prevNode, newNode)
......@@ -801,6 +812,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel]
if ok && managed == "false" {
az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
}
// Remove from excludeLoadBalancerNodes cache.
if _, hasExcludeBalancerLabel := prevNode.ObjectMeta.Labels[LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
}
}
......@@ -824,6 +841,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel]
if ok && managed == "false" {
az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
}
// Add to excludeLoadBalancerNodes cache.
if _, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
}
}
}
......@@ -913,16 +936,23 @@ func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
return sets.NewString(az.unmanagedNodes.List()...), nil
}
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged or in external resource group.
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(node *v1.Node) bool {
labels := node.ObjectMeta.Labels
if rg, ok := labels[externalResourceGroupLabel]; ok && !strings.EqualFold(rg, az.ResourceGroup) {
return true
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged, in external resource group or labeled with "node.kubernetes.io/exclude-from-external-load-balancers".
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, error) {
// Kubelet won't set az.nodeInformerSynced, always return nil.
if az.nodeInformerSynced == nil {
return false, nil
}
if managed, ok := labels[managedByAzureLabel]; ok && managed == "false" {
return true
az.nodeCachesLock.RLock()
defer az.nodeCachesLock.RUnlock()
if !az.nodeInformerSynced() {
return false, fmt.Errorf("node informer is not synced when trying to fetch node caches")
}
// Return true if the node is in external resource group.
if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok && !strings.EqualFold(cachedRG, az.ResourceGroup) {
return true, nil
}
return false
return az.excludeLoadBalancerNodes.Has(nodeName), nil
}
......@@ -69,6 +69,7 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
nodeInformerSynced: func() bool { return true },
nodeResourceGroups: map[string]string{},
unmanagedNodes: sets.NewString(),
excludeLoadBalancerNodes: sets.NewString(),
routeCIDRs: map[string]string{},
eventRecorder: &record.FakeRecorder{},
}
......
......@@ -343,7 +343,7 @@ func (az *Cloud) cleanBackendpoolForPrimarySLB(primarySLB *network.LoadBalancer,
},
}
// decouple the backendPool from the node
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted)
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, true)
if err != nil {
return nil, err
}
......@@ -1083,15 +1083,6 @@ func (az *Cloud) findFrontendIPConfigOfService(
return nil, false, nil
}
func nodeNameInNodes(nodeName string, nodes []*v1.Node) bool {
for _, node := range nodes {
if strings.EqualFold(nodeName, node.Name) {
return true
}
}
return false
}
// reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup.
// This also reconciles the Service's Ports with the LoadBalancer config.
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
......@@ -1147,7 +1138,12 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
// would not be in the `nodes` slice. We need to check the nodes that
// have been added to the LB's backendpool, find the unwanted ones and
// delete them from the pool.
if !nodeNameInNodes(nodeName, nodes) {
shouldExcludeLoadBalancer, err := az.ShouldNodeExcludedFromLoadBalancer(nodeName)
if err != nil {
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
return nil, err
}
if shouldExcludeLoadBalancer {
klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB", serviceName, wantLb, nodeName)
// construct a backendPool that only contains the IP config of the node to be deleted
backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)})
......@@ -1164,7 +1160,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
}
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
// decouple the backendPool from the node
err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted)
err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, false)
if err != nil {
return nil, err
}
......@@ -1448,14 +1444,28 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
// do nothing for availability set
lb.BackendAddressPools = nil
}
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true)
if err != nil {
klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err)
return nil, err
}
klog.V(10).Infof("EnsureBackendPoolDeleted(%s) for service %s: end", lbBackendPoolID, serviceName)
// Remove the LB.
existingLBs, err := az.ListLB(service)
if err != nil {
klog.Errorf("reconcileLoadBalancer: failed to list load balancer for service %q: %s", serviceName, err.Error())
return nil, err
}
foundLB := false
for _, existingLB := range existingLBs {
if strings.EqualFold(lbName, to.String(existingLB.Name)) {
foundLB = true
break
}
}
// Remove the LB if it exists.
if foundLB {
klog.V(10).Infof("reconcileLoadBalancer: az.DeleteLB(%q): start", lbName)
err = az.DeleteLB(service, lbName)
if err != nil {
......@@ -1464,6 +1474,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
}
klog.V(10).Infof("az.DeleteLB(%q): end", lbName)
}
}
} else {
klog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName)
err := az.CreateOrUpdateLB(service, *lb)
......@@ -1665,7 +1676,8 @@ func (az *Cloud) reconcileLoadBalancerRule(
if probeProtocol == "" {
probeProtocol = string(network.ProbeProtocolHTTP)
}
if requestPath == "" {
needRequestPath := strings.EqualFold(probeProtocol, string(network.ProbeProtocolHTTP)) || strings.EqualFold(probeProtocol, string(network.ProbeProtocolHTTPS))
if requestPath == "" && needRequestPath {
requestPath = podPresencePath
}
......
......@@ -914,7 +914,12 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
continue
}
if as.ShouldNodeExcludedFromLoadBalancer(node) {
shouldExcludeLoadBalancer, err := as.ShouldNodeExcludedFromLoadBalancer(localNodeName)
if err != nil {
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
return err
}
if shouldExcludeLoadBalancer {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
continue
}
......@@ -939,7 +944,7 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
}
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
// Returns nil if backend address pools already deleted.
if backendAddressPools == nil {
return nil
......
......@@ -64,7 +64,7 @@ type VMSet interface {
// participating in the specified LoadBalancer Backend Pool.
EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) (string, string, string, *compute.VirtualMachineScaleSetVM, error)
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error
......
......@@ -743,11 +743,16 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
continue
}
if ss.ShouldNodeExcludedFromLoadBalancer(nodes[nx]) {
nodeName := nodes[nx].Name
shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(nodeName)
if err != nil {
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
return nil, err
}
if shouldExcludeLoadBalancer {
continue
}
nodeName := nodes[nx].Name
ssName, _, _, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
if err != nil {
return nil, err
......@@ -1105,7 +1110,12 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
continue
}
if ss.ShouldNodeExcludedFromLoadBalancer(node) {
shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(node.Name)
if err != nil {
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", node.Name, err)
return err
}
if shouldExcludeLoadBalancer {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name)
continue
}
......@@ -1248,7 +1258,12 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
continue
}
if ss.ShouldNodeExcludedFromLoadBalancer(node) {
shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(localNodeName)
if err != nil {
klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
return err
}
if shouldExcludeLoadBalancer {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
continue
}
......@@ -1569,7 +1584,7 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
}
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
// Returns nil if backend address pools already deleted.
if backendAddressPools == nil {
return nil
......@@ -1681,10 +1696,12 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
}
// Ensure the backendPoolID is also deleted on VMSS itself.
if deleteFromVMSet {
err := ss.ensureBackendPoolDeletedFromVMSS(service, backendPoolID, vmSetName, ipConfigurationIDs)
if err != nil {
return err
}
}
isOperationSucceeded = true
return nil
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment