Unverified Commit da168695 authored by Hussein Galal's avatar Hussein Galal Committed by GitHub

Update to v1.19.16 (#4352)

* Update to v1.19.16 Signed-off-by: 's avatargalal-hussein <hussein.galal.ahmed.11@gmail.com> * Update to v1.19.16 Signed-off-by: 's avatargalal-hussein <hussein.galal.ahmed.11@gmail.com>
parent 973084ce
......@@ -36,31 +36,31 @@ replace (
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
google.golang.org/grpc => google.golang.org/grpc v1.27.1
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.15-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.15-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.15-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.15-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.15-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.15-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.15-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.15-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.15-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.15-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.15-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.15-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.15-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.15-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.15-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.15-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.15-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.15-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.19.15-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.15-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.15-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.19.15-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.15-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.15-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.19.15-k3s1
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.19.16-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.16-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.19.16-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.19.16-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.19.16-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.19.16-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.19.16-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.16-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.19.16-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.19.16-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.19.16-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.16-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.16-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.16-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.19.16-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.16-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.19.16-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.19.16-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.19.16-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.16-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.19.16-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.19.16-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.16-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.16-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.19.16-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
)
......@@ -113,15 +113,15 @@ require (
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
google.golang.org/grpc v1.33.2
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.19.15
k8s.io/apimachinery v0.19.15
k8s.io/apiserver v0.19.15
k8s.io/api v0.19.16
k8s.io/apimachinery v0.19.16
k8s.io/apiserver v0.19.16
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
k8s.io/cloud-provider v0.19.15
k8s.io/component-base v0.19.15
k8s.io/cloud-provider v0.19.16
k8s.io/component-base v0.19.16
k8s.io/cri-api v0.20.1
k8s.io/klog v1.0.0
k8s.io/kubernetes v1.19.15
k8s.io/kubernetes v1.19.16
k8s.io/utils v0.0.0-20200729134348-d5654de09c73
sigs.k8s.io/yaml v1.2.0
)
......@@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "19"
gitVersion = "v1.19.15-k3s1"
gitCommit = "a14bf227db31db89a172864ca2a934ff1c637eef"
gitVersion = "v1.19.16-k3s1"
gitCommit = "0c01d3bc4234649f6d071c70081997779f974d9c"
gitTreeState = "clean"
buildDate = "2021-09-16T15:13:53Z"
buildDate = "2021-10-28T17:29:36Z"
)
......@@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "19"
gitVersion = "v1.19.15-k3s1"
gitCommit = "a14bf227db31db89a172864ca2a934ff1c637eef"
gitVersion = "v1.19.16-k3s1"
gitCommit = "0c01d3bc4234649f6d071c70081997779f974d9c"
gitTreeState = "clean"
buildDate = "2021-09-16T15:13:53Z"
buildDate = "2021-10-28T17:29:36Z"
)
......@@ -138,9 +138,6 @@ func parseMaxSize(size string) (int64, error) {
if !ok {
return 0, fmt.Errorf("invalid max log size")
}
if maxSize < 0 {
return 0, fmt.Errorf("negative max log size %d", maxSize)
}
return maxSize, nil
}
......@@ -161,6 +158,10 @@ func NewContainerLogManager(runtimeService internalapi.RuntimeService, osInterfa
if err != nil {
return nil, fmt.Errorf("failed to parse container log max size %q: %v", maxSize, err)
}
// Negative number means to disable container log rotation
if parsedMaxSize < 0 {
return NewStubContainerLogManager(), nil
}
// policy LogRotatePolicy
return &containerLogManager{
osInterface: osInterface,
......
......@@ -80,16 +80,12 @@ func NewBalancedAllocation(_ runtime.Object, h framework.FrameworkHandle) (frame
// todo: use resource weights in the scorer function
func balancedResourceScorer(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
// This to find a node which has most balanced CPU, memory and volume usage.
cpuFraction := fractionOfCapacity(requested[v1.ResourceCPU], allocable[v1.ResourceCPU])
memoryFraction := fractionOfCapacity(requested[v1.ResourceMemory], allocable[v1.ResourceMemory])
// fractions might be greater than 1 because pods with no requests get minimum
// values.
if cpuFraction > 1 {
cpuFraction = 1
}
if memoryFraction > 1 {
memoryFraction = 1
// This to find a node which has most balanced CPU, memory and volume usage.
if cpuFraction >= 1 || memoryFraction >= 1 {
// if requested >= capacity, the corresponding host should never be preferred.
return 0
}
if includeVolumes && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && allocatableVolumes > 0 {
......
......@@ -275,7 +275,6 @@ func (plugin *vsphereVolumePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter,
// Detach the given device from the given node.
func (detacher *vsphereVMDKDetacher) Detach(volumeName string, nodeName types.NodeName) error {
volPath := getVolPathfromVolumeName(volumeName)
attached, newVolumePath, err := detacher.vsphereVolumes.DiskIsAttached(volPath, nodeName)
if err != nil {
......
......@@ -18,6 +18,7 @@ go_library(
"vsphere_util_linux.go",
"vsphere_util_unsupported.go",
"vsphere_util_windows.go",
"vsphere_volume_map.go",
],
importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/vsphere",
importpath = "k8s.io/legacy-cloud-providers/vsphere",
......@@ -32,6 +33,7 @@ go_library(
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/errors:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib:go_default_library",
"//staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers:go_default_library",
......@@ -55,6 +57,7 @@ go_test(
"credentialmanager_test.go",
"vsphere_test.go",
"vsphere_util_test.go",
"vsphere_volume_map_test.go",
],
embed = [":go_default_library"],
deps = [
......@@ -71,6 +74,7 @@ go_test(
"//vendor/github.com/vmware/govmomi:go_default_library",
"//vendor/github.com/vmware/govmomi/find:go_default_library",
"//vendor/github.com/vmware/govmomi/lookup/simulator:go_default_library",
"//vendor/github.com/vmware/govmomi/object:go_default_library",
"//vendor/github.com/vmware/govmomi/property:go_default_library",
"//vendor/github.com/vmware/govmomi/simulator:go_default_library",
"//vendor/github.com/vmware/govmomi/simulator/vpx:go_default_library",
......
......@@ -311,6 +311,17 @@ func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) {
return nodeDetails, nil
}
// GetNodeNames returns list of nodes that are known to vsphere cloudprovider.
// These are typically nodes that make up k8s cluster.
func (nm *NodeManager) GetNodeNames() []k8stypes.NodeName {
nodes := nm.getNodes()
var nodeNameList []k8stypes.NodeName
for _, node := range nodes {
nodeNameList = append(nodeNameList, k8stypes.NodeName(node.Name))
}
return nodeNameList
}
func (nm *NodeManager) refreshNodes() (errList []error) {
for nodeName := range nm.getNodes() {
nodeInfo, err := nm.getRefreshedNodeInfo(convertToK8sType(nodeName))
......
......@@ -24,7 +24,6 @@ import (
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/klog/v2"
......@@ -181,14 +180,6 @@ func IsInvalidCredentialsError(err error) bool {
return isInvalidCredentialsError
}
// VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM.
func VerifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) {
// Verify if the volume paths are present on the VM backing virtual disk devices
vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)
VerifyVolumePathsForVMDevices(vmDevices, volPaths, nodeName, nodeVolumeMap)
}
// VerifyVolumePathsForVMDevices verifies if the volume paths (volPaths) are attached to VM.
func VerifyVolumePathsForVMDevices(vmDevices object.VirtualDeviceList, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) {
volPathsMap := make(map[string]bool)
......
......@@ -23,6 +23,7 @@ import (
"errors"
"fmt"
"io"
"net"
"net/url"
"os"
......@@ -48,6 +49,7 @@ import (
"k8s.io/client-go/tools/cache"
cloudprovider "k8s.io/cloud-provider"
nodehelpers "k8s.io/cloud-provider/node/helpers"
volerr "k8s.io/cloud-provider/volume/errors"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
"k8s.io/klog/v2"
......@@ -95,6 +97,7 @@ type VSphere struct {
hostName string
// Maps the VSphere IP address to VSphereInstance
vsphereInstanceMap map[string]*VSphereInstance
vsphereVolumeMap *VsphereVolumeMap
// Responsible for managing discovery of k8s node, their location etc.
nodeManager *NodeManager
vmUUID string
......@@ -542,6 +545,7 @@ func buildVSphereFromConfig(cfg VSphereConfig) (*VSphere, error) {
nodeInfoMap: make(map[string]*NodeInfo),
registeredNodes: make(map[string]*v1.Node),
},
vsphereVolumeMap: NewVsphereVolumeMap(),
isSecretInfoProvided: isSecretInfoProvided,
isSecretManaged: !cfg.Global.SecretNotManaged,
cfg: &cfg,
......@@ -950,6 +954,20 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN
}
}
klog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err)
if err != nil {
// if attach failed, we should check if disk is attached somewhere else. This can happen for several reasons
// and throwing a dangling volume error here will allow attach-detach controller to detach disk from a node
// where it is not needed.
existingNode, ok := vs.vsphereVolumeMap.CheckForVolume(vmDiskPath)
if ok {
attached, newVolumePath, diskAttachedError := vs.DiskIsAttached(vmDiskPath, existingNode)
// if disk is attached somewhere else then we can throw a dangling error
if diskAttachedError == nil && attached && (nodeName != existingNode) {
klog.V(3).Infof("found dangling volume %s to node %s", vmDiskPath, existingNode)
return "", volerr.NewDanglingError(err.Error(), existingNode, newVolumePath)
}
}
}
vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err)
return diskUUID, err
}
......@@ -1083,6 +1101,7 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b
// 5b. If VMs are removed from vSphere inventory they are ignored.
func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
disksAreAttachedInternal := func(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
vs.vsphereVolumeMap.StartDiskVerification()
// disksAreAttach checks whether disks are attached to the nodes.
// Returns nodes that need to be retried if retry is true
......@@ -1194,8 +1213,17 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
for nodeName, volPaths := range attached {
disksAttached[convertToK8sType(nodeName)] = volPaths
}
}
klog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached)
klog.V(4).Infof("DisksAreAttached successfully executed. result: %+v", attached)
// There could be nodes in cluster which do not have any pods with vsphere volumes running on them
// such nodes won't be part of nodeVolumes map because attach-detach controller does not keep track
// such nodes. But such nodes may still have dangling volumes on them and hence we need to scan all the
// remaining nodes which weren't scanned by code previously.
vs.BuildMissingVolumeNodeMap(ctx)
// any volume which we could not verify will be removed from the map.
vs.vsphereVolumeMap.RemoveUnverified()
klog.V(4).Infof("current node volume map is: %+v", vs.vsphereVolumeMap.volumeNodeMap)
return disksAttached, nil
}
requestTime := time.Now()
......
......@@ -26,6 +26,7 @@ import (
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/vmware/govmomi/find"
......@@ -569,6 +570,7 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
return nodesToRetry, err
}
klog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm)
vs.vsphereVolumeMap.Add(nodeName, devices)
vclib.VerifyVolumePathsForVMDevices(devices, nodeVolumes[nodeName], convertToString(nodeName), attached)
}
}
......@@ -599,11 +601,131 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
}
nodeUUID = strings.ToLower(nodeUUID)
klog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap)
vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached)
vmMo := vmMoMap[nodeUUID]
vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)
vs.vsphereVolumeMap.Add(nodeName, vmDevices)
vclib.VerifyVolumePathsForVMDevices(vmDevices, nodeVolumes[nodeName], convertToString(nodeName), attached)
}
return nodesToRetry, nil
}
// BuildMissingVolumeNodeMap builds a map of volumes and nodes which are not known to attach detach controller.
// There could be nodes in cluster which do not have any pods with vsphere volumes running on them
// such nodes won't be part of disk verification check because attach-detach controller does not keep track
// such nodes. But such nodes may still have dangling volumes on them and hence we need to scan all the
// remaining nodes which weren't scanned by code previously.
func (vs *VSphere) BuildMissingVolumeNodeMap(ctx context.Context) {
nodeNames := vs.nodeManager.GetNodeNames()
// Segregate nodes according to VC-DC
dcNodes := make(map[string][]k8stypes.NodeName)
for _, nodeName := range nodeNames {
// if given node is not in node volume map
if !vs.vsphereVolumeMap.CheckForNode(nodeName) {
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
klog.V(4).Infof("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err)
continue
}
vcDC := nodeInfo.vcServer + nodeInfo.dataCenter.String()
dcNodes[vcDC] = append(dcNodes[vcDC], nodeName)
}
}
var wg sync.WaitGroup
for _, nodeNames := range dcNodes {
// Start go routines per VC-DC to check disks are attached
wg.Add(1)
go func(nodes []k8stypes.NodeName) {
err := vs.checkNodeDisks(ctx, nodes)
if err != nil {
klog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err)
}
wg.Done()
}(nodeNames)
}
wg.Wait()
}
func (vs *VSphere) checkNodeDisks(ctx context.Context, nodeNames []k8stypes.NodeName) error {
var vmList []*vclib.VirtualMachine
var nodeInfo NodeInfo
var err error
for _, nodeName := range nodeNames {
nodeInfo, err = vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
return err
}
vmList = append(vmList, nodeInfo.vm)
}
// Making sure session is valid
_, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx)
if err != nil {
return err
}
// If any of the nodes are not present property collector query will fail for entire operation
vmMoList, err := nodeInfo.dataCenter.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name", "config.uuid"})
if err != nil {
if vclib.IsManagedObjectNotFoundError(err) {
klog.V(4).Infof("checkNodeDisks: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodeNames, vmList)
// Property Collector Query failed
// VerifyVolumePaths per VM
for _, nodeName := range nodeNames {
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
if err != nil {
return err
}
devices, err := nodeInfo.vm.VirtualMachine.Device(ctx)
if err != nil {
if vclib.IsManagedObjectNotFoundError(err) {
klog.V(4).Infof("checkNodeDisks: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm)
continue
}
return err
}
klog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm)
vs.vsphereVolumeMap.Add(nodeName, devices)
}
return nil
}
return err
}
vmMoMap := make(map[string]mo.VirtualMachine)
for _, vmMo := range vmMoList {
if vmMo.Config == nil {
klog.Errorf("Config is not available for VM: %q", vmMo.Name)
continue
}
klog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid))
vmMoMap[strings.ToLower(vmMo.Config.Uuid)] = vmMo
}
klog.V(9).Infof("vmMoMap: +%v", vmMoMap)
for _, nodeName := range nodeNames {
node, err := vs.nodeManager.GetNode(nodeName)
if err != nil {
return err
}
nodeUUID, err := GetNodeUUID(&node)
if err != nil {
klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err)
return err
}
nodeUUID = strings.ToLower(nodeUUID)
klog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap)
vmMo := vmMoMap[nodeUUID]
vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)
vs.vsphereVolumeMap.Add(nodeName, vmDevices)
}
return nil
}
func (vs *VSphere) GetNodeNameFromProviderID(providerID string) (string, error) {
var nodeName string
nodes, err := vs.nodeManager.GetNodeDetails()
......
// +build !providerless
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"sync"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types"
k8stypes "k8s.io/apimachinery/pkg/types"
)
type volumePath string
type nodeVolumeStatus struct {
nodeName k8stypes.NodeName
verified bool
}
// VsphereVolumeMap stores last known state of node and volume mapping
type VsphereVolumeMap struct {
volumeNodeMap map[volumePath]nodeVolumeStatus
nodeMap map[k8stypes.NodeName]bool
lock sync.RWMutex
}
func NewVsphereVolumeMap() *VsphereVolumeMap {
return &VsphereVolumeMap{
volumeNodeMap: map[volumePath]nodeVolumeStatus{},
nodeMap: map[k8stypes.NodeName]bool{},
}
}
// StartDiskVerification marks all known volumes as unverified so as
// disks which aren't verified can be removed at the end of verification process
func (vsphereVolume *VsphereVolumeMap) StartDiskVerification() {
vsphereVolume.lock.Lock()
defer vsphereVolume.lock.Unlock()
for k, v := range vsphereVolume.volumeNodeMap {
v.verified = false
vsphereVolume.volumeNodeMap[k] = v
}
// reset nodeMap to empty so that any node we could not verify via usual verification process
// can still be verified.
vsphereVolume.nodeMap = map[k8stypes.NodeName]bool{}
}
// CheckForVolume verifies if disk is attached to some node in the cluster.
// This check is not definitive and should be followed up by separate verification.
func (vsphereVolume *VsphereVolumeMap) CheckForVolume(path string) (k8stypes.NodeName, bool) {
vsphereVolume.lock.RLock()
defer vsphereVolume.lock.RUnlock()
vPath := volumePath(path)
ns, ok := vsphereVolume.volumeNodeMap[vPath]
if ok {
return ns.nodeName, true
}
return "", false
}
// CheckForNode returns true if given node has already been processed by volume
// verification mechanism. This is used to skip verifying attached disks on nodes
// which were previously verified.
func (vsphereVolume *VsphereVolumeMap) CheckForNode(nodeName k8stypes.NodeName) bool {
vsphereVolume.lock.RLock()
defer vsphereVolume.lock.RUnlock()
_, ok := vsphereVolume.nodeMap[nodeName]
return ok
}
// Add all devices found on a node to the device map
func (vsphereVolume *VsphereVolumeMap) Add(node k8stypes.NodeName, vmDevices object.VirtualDeviceList) {
vsphereVolume.lock.Lock()
defer vsphereVolume.lock.Unlock()
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
filename := volumePath(backing.FileName)
vsphereVolume.volumeNodeMap[filename] = nodeVolumeStatus{node, true}
vsphereVolume.nodeMap[node] = true
}
}
}
}
// RemoveUnverified will remove any device which we could not verify to be attached to a node.
func (vsphereVolume *VsphereVolumeMap) RemoveUnverified() {
vsphereVolume.lock.Lock()
defer vsphereVolume.lock.Unlock()
for k, v := range vsphereVolume.volumeNodeMap {
if !v.verified {
delete(vsphereVolume.volumeNodeMap, k)
delete(vsphereVolume.nodeMap, v.nodeName)
}
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment