def scale(config, plandir, cluster, overrides): plan = cluster data = {'cluster': cluster, 'kube': cluster, 'kubetype': 'k3s'} data['basedir'] = '/workdir' if container_mode() else '.' cluster = data.get('cluster') clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster) if os.path.exists("%s/kcli_parameters.yml" % clusterdir): with open("%s/kcli_parameters.yml" % clusterdir, 'r') as install: installparam = yaml.safe_load(install) data.update(installparam) plan = installparam.get('plan', plan) data.update(overrides) with open("%s/kcli_parameters.yml" % clusterdir, 'w') as paramfile: yaml.safe_dump(data, paramfile) client = config.client k = config.k pprint("Scaling on client %s" % client) image = k.info("%s-master-0" % cluster).get('image') if image is None: error("Missing image...") sys.exit(1) else: pprint("Using image %s" % image) data['image'] = image os.chdir(os.path.expanduser("~/.kcli")) for role in ['masters', 'workers']: overrides = data.copy() threaded = data.get('threaded', False) or data.get( f'{role}_threaded', False) if role == 'masters' and overrides.get('masters', 1) == 1: continue config.plan(plan, inputfile='%s/%s.yml' % (plandir, role), overrides=overrides, threaded=threaded)
def scale(config, plandir, cluster, overrides): plan = cluster data = {'cluster': cluster, 'xip': False, 'kube': cluster, 'kubetype': 'generic'} data['basedir'] = '/workdir' if os.path.exists("/i_am_a_container") else '.' cluster = data.get('cluster') clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster) if os.path.exists("%s/kcli_parameters.yml" % clusterdir): with open("%s/kcli_parameters.yml" % clusterdir, 'r') as install: installparam = yaml.safe_load(install) data.update(installparam) plan = installparam.get('plan', plan) data.update(overrides) client = config.client k = config.k pprint("Scaling on client %s" % client) image = k.info("%s-master-0" % cluster).get('image') if image is None: error("Missing image...") sys.exit(1) else: pprint("Using image %s" % image) data['image'] = image data['ubuntu'] = True if 'ubuntu' in image.lower() or [entry for entry in UBUNTUS if entry in image] else False os.chdir(os.path.expanduser("~/.kcli")) for role in ['masters', 'workers']: overrides = data.copy() if overrides.get(role, 0) == 0: continue config.plan(plan, inputfile='%s/%s.yml' % (plandir, role), overrides=overrides)
def waitForMe(t): while t.info.state not in [vim.TaskInfo.State.success, vim.TaskInfo.State.error]: time.sleep(1) if t.info.state == vim.TaskInfo.State.error: error(t.info.description) error(t.info.error) os._exit(1)
def add_disk(self, name, size, pool=None, thin=True, image=None, shareable=False, existing=None, interface='virtio', novm=False, overrides={}): glance = self.glance cinder = self.cinder nova = self.nova try: vm = nova.servers.find(name=name) except: error("VM %s not found" % name) return {'result': 'failure', 'reason': "VM %s not found" % name} if image is not None: glanceimages = [ img for img in glance.images.list() if img.name == image ] if glanceimages: glanceimage = glanceimages[0] else: msg = "you don't have image %s" % image return {'result': 'failure', 'reason': msg} volume = cinder.volumes.create(name=name, size=size, imageRef=glanceimage) cinder.volumes.attach(volume, vm.id, '/dev/vdi', mode='rw') return {'result': 'success'}
def delete(self, name, snapshots=False): cinder = self.cinder nova = self.nova try: vm = nova.servers.find(name=name) except: error("VM %s not found" % name) return {'result': 'failure', 'reason': "VM %s not found" % name} floating_ips = { f['floating_ip_address']: f['id'] for f in self.neutron.list_floatingips()['floatingips'] } vm_floating_ips = [] for key in list(vm.addresses): entry1 = vm.addresses[key] for entry2 in entry1: if entry2['OS-EXT-IPS:type'] == 'floating': vm_floating_ips.append(entry2['addr']) vm.delete() for floating in vm_floating_ips: floatingid = floating_ips[floating] try: self.neutron.delete_floatingip(floatingid) except Exception as e: error("Hit %s when trying to delete floating %s" % (str(e), floating)) index = 0 for disk in vm._info['os-extended-volumes:volumes_attached']: volume = cinder.volumes.get(disk['id']) for attachment in volume.attachments: if attachment['server_id'] == vm.id: cinder.volumes.detach(volume, attachment['attachment_id']) cinder.volumes.delete(disk['id']) index += 1 return {'result': 'success'}
def update_cpus(self, name, numcpus): conn = self.conn try: Filters = {'Name': "tag:Name", 'Values': [name]} vm = conn.describe_instances( Filters=[Filters])['Reservations'][0]['Instances'][0] except: return {'result': 'failure', 'reason': "VM %s not found" % name} instanceid = vm['InstanceId'] state = vm['State']['Name'] if state != 'stopped': error("Can't update cpus of VM %s while up" % name) return {'result': 'failure', 'reason': "VM %s up" % name} instancetype = [f for f in staticf if staticf[f]['cpus'] >= numcpus] if instancetype: flavor = instancetype[0] pprint("Using flavor %s" % flavor) conn.modify_instance_attribute(InstanceId=instanceid, Attribute='instanceType', Value=flavor, DryRun=False) return {'result': 'success'} else: error("Couldn't find matching flavor for this number of cpus") return { 'result': 'failure', 'reason': "Couldn't find matching flavor for this number of cpus" }
def scale(config, plandir, cluster, overrides): plan = cluster data = {'cluster': cluster, 'kube': cluster, 'kubetype': 'k3s'} data['basedir'] = '/workdir' if os.path.exists("/i_am_a_container") else '.' cluster = data.get('cluster') clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster) if not os.path.exists(clusterdir): error("Cluster directory %s not found..." % clusterdir) sys.exit(1) if os.path.exists("%s/kcli_parameters.yml" % clusterdir): with open("%s/kcli_parameters.yml" % clusterdir, 'r') as install: installparam = yaml.safe_load(install) data.update(installparam) plan = installparam.get('plan', plan) data.update(overrides) client = config.client k = config.k pprint("Scaling on client %s" % client) image = k.info("%s-master-0" % cluster).get('image') if image is None: error("Missing image...") sys.exit(1) else: pprint("Using image %s" % image) data['image'] = image os.chdir(os.path.expanduser("~/.kcli")) # config.plan(plan, inputfile='%s/workers.yml' % plandir, overrides=data) for role in ['masters', 'workers']: overrides = data.copy() # if overrides.get(role, 0) == 0: # continue config.plan(plan, inputfile='%s/%s.yml' % (plandir, role), overrides=overrides)
def add_image(self, url, pool, short=None, cmd=None, name=None, size=None): shortimage = os.path.basename(url).split('?')[0] if [i for i in self.glance.images.list() if i['name'] == shortimage]: return {'result': 'success'} if not os.path.exists('/tmp/%s' % shortimage): downloadcmd = "curl -Lo /tmp/%s -f '%s'" % (shortimage, url) code = os.system(downloadcmd) if code != 0: return { 'result': 'failure', 'reason': "Unable to download indicated image" } if shortimage.endswith('gz'): if find_executable('gunzip') is not None: uncompresscmd = "gunzip /tmp/%s" % (shortimage) os.system(uncompresscmd) else: error("gunzip not found. Can't uncompress image") return { 'result': 'failure', 'reason': "gunzip not found. Can't uncompress image" } shortimage = shortimage.replace('.gz', '') glanceimage = self.glance.images.create(name=shortimage, disk_format='qcow2', container_format='bare') self.glance.images.upload(glanceimage.id, open('/tmp/%s' % shortimage, 'rb')) os.remove('/tmp/%s' % shortimage) return {'result': 'success'}
def delete_disk(self, name=None, diskname=None, pool=None, novm=False): cinder = self.cinder nova = self.nova if name is None: volumes = [ volume for volume in cinder.volumes.list() if volume.name == diskname ] if volumes: volume = volumes[0] else: msg = "Disk %s not found" % diskname return {'result': 'failure', 'reason': msg} cinder.volumes.delete(volume.id) return {'result': 'success'} try: vm = nova.servers.find(name=name) except: error("VM %s not found" % name) return {'result': 'failure', 'reason': "VM %s not found" % name} for disk in vm._info['os-extended-volumes:volumes_attached']: volume = cinder.volumes.get(disk['id']) if diskname == volume.name: for attachment in volume.attachments: if attachment['server_id'] == vm.id: cinder.volumes.detach(volume, attachment['attachment_id']) cinder.volumes.delete(disk['id']) return {'result': 'success'}
def get_downstream_installer(nightly=False, macosx=False, tag=None): repo = 'ocp-dev-preview' if nightly else 'ocp' if tag is None: repo += '/latest' elif str(tag).count('.') == 1: repo += '/latest-%s' % tag else: repo += '/%s' % tag.replace('-x86_64', '') INSTALLSYSTEM = 'mac' if os.path.exists('/Users') or macosx else 'linux' msg = 'Downloading openshift-install from https://mirror.openshift.com/pub/openshift-v4/clients/%s' % repo pprint(msg) r = urlopen( "https://mirror.openshift.com/pub/openshift-v4/clients/%s/release.txt" % repo).readlines() version = None for line in r: if 'Name' in str(line): version = str(line).split(':')[1].strip().replace('\\n', '').replace( "'", "") break if version is None: error("Coudldn't find version") return 1 cmd = "curl -s https://mirror.openshift.com/pub/openshift-v4/clients/%s/" % repo cmd += "openshift-install-%s-%s.tar.gz " % (INSTALLSYSTEM, version) cmd += "| tar zxf - openshift-install" cmd += "; chmod 700 openshift-install" return call(cmd, shell=True)
def __init__(self, auth_token, project=None, debug=False, facility=None, tunnelhost=None, tunneluser='******', tunnelport=22, tunneldir='/var/www/html'): self.debug = debug self.conn = None self.tunnelhost = tunnelhost self.tunnelport = tunnelport self.tunneluser = tunneluser self.tunneldir = tunneldir self.facility = facility self.auth_token = auth_token conn = Manager(auth_token=auth_token) try: projects = [ p.id for p in conn.list_projects() if p.name == project or p.id == project ] except Error as e: error(e) return if projects: self.project = projects[0] self.conn = conn else: error("Invalid project %s" % project) return
def _uploadimage(self, pool, origin, directory, verbose=False, temp=False): if verbose: pprint("Uploading %s to %s/%s" % (origin, pool, directory)) si = self.si rootFolder = self.rootFolder datastore = find(si, rootFolder, vim.Datastore, pool) if not datastore: return {'result': 'failure', 'reason': "Pool %s not found" % pool} destination = os.path.basename(origin) if temp: destination = "temp-%s" % destination url = "https://%s:443/folder/%s/%s?dcPath=%s&dsName=%s" % (self.vcip, directory, destination, self.dc.name, pool) client_cookie = si._stub.cookie cookie_name = client_cookie.split("=", 1)[0] cookie_value = client_cookie.split("=", 1)[1].split(";", 1)[0] cookie_path = client_cookie.split("=", 1)[1].split(";", 1)[1].split(";", 1)[0].lstrip() cookie_text = " " + cookie_value + "; $" + cookie_path cookie = {cookie_name: cookie_text} headers = {'Content-Type': 'application/octet-stream'} with open(origin, "rb") as f: if hasattr(requests.packages.urllib3, 'disable_warnings'): requests.packages.urllib3.disable_warnings() r = requests.put(url, data=f, headers=headers, cookies=cookie, verify=False) if verbose: if r.status_code not in [200, 201]: error(r.status_code, r.text) else: success("Successfull upload of %s to %s" % (origin, pool))
def export(self, name, image=None): cinder = self.cinder nova = self.nova try: vm = nova.servers.find(name=name) except: error("VM %s not found" % name) return {'result': 'failure', 'reason': "VM %s not found" % name} for disk in vm._info['os-extended-volumes:volumes_attached']: volume = cinder.volumes.get(disk['id']) for attachment in volume.attachments: newname = image if image is not None else volume.name.replace( '-disk0', '') volume.upload_to_image(True, newname, 'bare', 'qcow2') status = '' timeout = 0 while status != 'available': status = cinder.volumes.get(disk['id']).status pprint("Waiting 5 seconds for export to complete") sleep(5) timeout += 5 if timeout >= 90: error("Time out waiting for export to complete") break break return {'result': 'success'}
def power_on(self): result = self.k.start(self.name) if result['result'] == 'success': self.powerstate = 'on' success('%s powered on!' % self.name) else: error('%s not powered on because %s' % (self.name, result['reason']))
def power_off(self): result = self.k.stop(self.name) if result['result'] == 'success': success('%s powered off!' % self.name) self.powerstate = 'off' else: error('%s not powered off because %s' % (self.name, result['reason']))
def restart(self, name): nova = self.nova try: vm = nova.servers.find(name=name) except: error("VM %s not found" % name) return {'result': 'failure', 'reason': "VM %s not found" % name} vm.reboot() return {'result': 'success'}
def delete_network_port(self, name, network=None, floating=False): neutron = self.neutron matchingports = [ i for i in neutron.list_ports()['ports'] if i['name'] == name ] if not matchingports: msg = "Port %s not found" % name error(msg) return {'result': 'failure', 'reason': msg} self.neutron.delete_port(matchingports[0]['id'])
def info(self, name, output='plain', fields=[], values=False, vm=None, debug=False): translation = {'poweredOff': 'down', 'poweredOn': 'up', 'suspended': 'suspended'} yamlinfo = {} si = self.si dc = self.dc vmFolder = dc.vmFolder if vm is None: vm = findvm(si, vmFolder, name) if vm is None: error("VM %s not found" % name) return {} summary = vm.summary yamlinfo['name'] = name yamlinfo['id'] = summary.config.instanceUuid yamlinfo['cpus'] = vm.config.hardware.numCPU yamlinfo['memory'] = vm.config.hardware.memoryMB yamlinfo['status'] = translation[vm.runtime.powerState] yamlinfo['nets'] = [] yamlinfo['disks'] = [] devices = vm.config.hardware.device mainmac = None for number, dev in enumerate(devices): if "addressType" in dir(dev): network = dev.backing.deviceName device = dev.deviceInfo.label networktype = 'N/A' mac = dev.macAddress if mainmac is None: mainmac = mac net = {'device': device, 'mac': mac, 'net': network, 'type': networktype} yamlinfo['nets'].append(net) if type(dev).__name__ == 'vim.vm.device.VirtualDisk': device = "disk%s" % dev.unitNumber disksize = convert(1000 * dev.capacityInKB, GB=False) diskformat = dev.backing.diskMode drivertype = 'thin' if dev.backing.thinProvisioned else 'thick' path = dev.backing.datastore.name disk = {'device': device, 'size': int(disksize), 'format': diskformat, 'type': drivertype, 'path': path} yamlinfo['disks'].append(disk) if vm.runtime.powerState == "poweredOn": yamlinfo['host'] = vm.runtime.host.name for nic in vm.guest.net: currentmac = nic.macAddress currentips = nic.ipAddress if currentmac == mainmac and currentips: yamlinfo['ip'] = currentips[0] for entry in vm.config.extraConfig: if entry.key in METADATA_FIELDS: yamlinfo[entry.key] = entry.value if entry.key == 'image': yamlinfo['user'] = common.get_user(entry.value) if debug: yamlinfo['debug'] = vm.config.extraConfig return yamlinfo
def serialconsole(self, name, web=False): nova = self.nova try: vm = nova.servers.find(name=name) except: error("VM %s not found" % name) return {'result': 'failure', 'reason': "VM %s not found" % name} cmd = vm.get_console_output() if web: return cmd print(cmd) return
def ssh(self, request, context): print("Handling ssh call for:\n%s" % request) config = Kconfig() k = config.k name = request.name l = request.l if request.l != '' else None r = request.r if request.r != '' else None X = request.X Y = request.Y D = request.D if request.D != '' else None user = request.user if request.user != '' else None cmd = request.cmd if request.cmd != '' else None tunnel = config.tunnel tunnelhost = config.tunnelhost if tunnel and tunnelhost is None: error("Tunnel requested but invalid tunnelhost") os._exit(1) tunnelport = config.tunnelport tunneluser = config.tunneluser insecure = config.insecure if '@' in name and len(name.split('@')) == 2: user = name.split('@')[0] name = name.split('@')[1] if os.path.exists("/i_am_a_container") and not os.path.exists("/root/.kcli/config.yml")\ and not os.path.exists("/root/.ssh/config"): insecure = True u, ip, vmport = common._ssh_credentials(k, name) if ip is None: return kcli_pb2.sshcmd(sshcmd='') if user is None: user = config.vmuser if config.vmuser is not None else u if vmport is None and config.vmport is not None: vmport = config.vmport sshcmd = common.ssh(name, ip=ip, user=user, local=l, remote=r, tunnel=tunnel, tunnelhost=tunnelhost, tunnelport=tunnelport, tunneluser=tunneluser, insecure=insecure, cmd=cmd, X=X, Y=Y, D=D, vmport=vmport) response = kcli_pb2.sshcmd(sshcmd=sshcmd) return response
def update_metadata(self, name, metatype, metavalue, append=False): nova = self.nova try: vm = nova.servers.find(name=name) except: error("VM %s not found" % name) return metadata = vm.metadata if append and metatype in metadata: metadata[metatype] += ",%s" % metavalue else: metadata[metatype] = metavalue nova.servers.set_meta(vm.id, metadata) return {'result': 'success'}
def __init__(self, authdata, port, name, client): super(KBmc, self).__init__(authdata, port) self.bootdevice = 'default' self.k = Kconfig(client=client).k if not self.k.exists(name): error('%s not found.Leaving' % name) sys.exit(1) else: status = self.k.info(name)['status'] self.powerstate = 'off' if status.lower() not in [ 'up', 'poweredon' ] else 'on' pprint('Handling vm %s on port %s' % (name, port)) pprint('Initial state for vm %s: %s' % (name, self.powerstate)) self.name = name
def update_iso(self, name, iso): si = self.si dc = self.dc vmFolder = dc.vmFolder vm = findvm(si, vmFolder, name) isos = [i for i in self._getisos() if i.endswith(iso)] if not isos: error("Iso %s not found.Leaving..." % iso) return {'result': 'failure', 'reason': "Iso %s not found" % iso} else: iso = isos[0] if vm is None: return {'result': 'failure', 'reason': "VM %s not found" % name} c = changecd(self.si, vm, iso) waitForMe(c) return {'result': 'success'}
def delete_dns(self, name, domain, instanceid=None): dns = self.dns cluster = None fqdn = "%s.%s" % (name, domain) if fqdn.split('-')[0] == fqdn.split('.')[1]: cluster = fqdn.split('-')[0] name = '.'.join(fqdn.split('.')[:1]) domain = fqdn.replace("%s." % name, '').replace("%s." % cluster, '') zone = [ z['Id'].split('/')[2] for z in dns.list_hosted_zones_by_name()['HostedZones'] if z['Name'] == '%s.' % domain ] if not zone: error("Domain not found") return {'result': 'failure', 'reason': "Domain not found"} zoneid = zone[0] dnsentry = name if cluster is None else "%s.%s" % (name, cluster) entry = "%s.%s." % (dnsentry, domain) ip = self.ip(instanceid) if ip is None: error("Couldn't Get DNS Ip for %s" % name) return recs = [] clusterdomain = "%s.%s" % (cluster, domain) for record in dns.list_resource_record_sets( HostedZoneId=zoneid)['ResourceRecordSets']: if entry in record['Name'] or ('master-0' in name and record['Name'].endswith( "%s." % clusterdomain)): recs.append(record) else: for rrdata in record['ResourceRecords']: if name in rrdata['Value']: recs.append(record) changes = [{ 'Action': 'DELETE', 'ResourceRecordSet': record } for record in recs] try: dns.change_resource_record_sets(HostedZoneId=zoneid, ChangeBatch={'Changes': changes}) except: pass return {'result': 'success'}
def console(self, name, tunnel=False, web=False): nova = self.nova try: vm = nova.servers.find(name=name) except: error("VM %s not found" % name) return {'result': 'failure', 'reason': "VM %s not found" % name} url = vm.get_vnc_console('novnc')['console']['url'] if web: return url if self.debug or os.path.exists("/i_am_a_container"): msg = "Open the following url:\n%s" % url if os.path.exists( "/i_am_a_container") else url pprint(msg) else: pprint("Opening url: %s" % url) webbrowser.open(url, new=2, autoraise=True) return
def report(self): """ :return: """ projects = [ proj for proj in self.conn.list_projects() if proj.name == self.project or proj.id == self.project ] if not projects: error("Project %s not found" % self.project) return project = projects[0] print("Project name: %s" % project.name) print("Project id: %s" % project.id) if self.facility is not None: print("Facility: %s" % self.facility) print("Vms Running: %s" % len(self.conn.list_devices(self.project))) return
def delete_container(self, name): """ :param self: :param name: :return: """ try: pods = [] rsname = None for rs in self.v1beta.list_namespaced_replica_set( self.namespace).items: owner_references = rs.metadata.owner_references if owner_references is None: continue ownerkind = owner_references[0].kind ownername = owner_references[0].name if ownerkind == 'Deployment' and ownername == name: rsname = rs.metadata.name for pod in self.core.list_namespaced_pod( self.namespace).items: owner_references = pod.metadata.owner_references if owner_references is None: continue ownerkind = owner_references[0].kind ownername = owner_references[0].name if ownerkind == 'ReplicaSet' and ownername == rsname: pods.append(pod.metadata.name) self.v1beta.delete_namespaced_deployment(name, self.namespace, client.V1DeleteOptions()) if rsname is not None: self.v1beta.delete_namespaced_replica_set( rs.metadata.name, self.namespace, client.V1DeleteOptions()) for pod in pods: self.core.delete_namespaced_pod(pod, self.namespace, client.V1DeleteOptions()) except client.rest.ApiException: try: self.core.delete_namespaced_pod(name, self.namespace, client.V1DeleteOptions()) except client.rest.ApiException: error("Container %s not found" % name) return {'result': 'failure', 'reason': "Missing template"} return {'result': 'success'}
def update_flavor(self, name, flavor): conn = self.conn try: Filters = {'Name': "tag:Name", 'Values': [name]} vm = conn.describe_instances( Filters=[Filters])['Reservations'][0]['Instances'][0] except: return {'result': 'failure', 'reason': "VM %s not found" % name} instanceid = vm['InstanceId'] instancetype = vm['InstanceType'] state = vm['State']['Name'] if state != 'stopped': error("Can't update cpus of VM %s while up" % name) return {'result': 'failure', 'reason': "VM %s up" % name} if instancetype != flavor: conn.modify_instance_attribute(InstanceId=instanceid, Attribute='instanceType', Value=flavor, DryRun=False) return {'result': 'success'}
def scp(self, request, context): print("Handling scp call for:\n%s" % request) name = request.name recursive = request.recursive source = request.source destination = request.destination download = request.download user = request.user if request.user != '' else None config = Kconfig() k = config.k tunnel = config.tunnel tunnelhost = config.tunnelhost tunnelport = config.tunnelport tunneluser = config.tunneluser if tunnel and tunnelhost is None: error("Tunnel requested but invalid tunnelhost") os._exit(1) insecure = config.insecure u, ip, vmport = common._ssh_credentials(k, name) if ip is None: return if user is None: user = config.vmuser if config.vmuser is not None else u if vmport is None and config.vmport is not None: vmport = config.vmport scpcommand = common.scp(name, ip=ip, user=user, source=source, destination=destination, tunnel=tunnel, tunnelhost=tunnelhost, tunnelport=tunnelport, tunneluser=tunneluser, download=download, recursive=recursive, insecure=insecure, vmport=vmport) response = kcli_pb2.sshcmd(sshcmd=scpcommand) return response
def create(config, plandir, cluster, overrides, dnsconfig=None): k = config.k data = {'kubetype': 'kind'} data.update(overrides) if 'keys' not in overrides and get_ssh_pub_key() is None: error( "No usable public key found, which is required for the deployment") sys.exit(1) data['cluster'] = overrides.get( 'cluster', cluster if cluster is not None else 'testk') plan = cluster if cluster is not None else data['cluster'] data['kube'] = data['cluster'] masters = data.get('masters', 1) if masters == 0: error("Invalid number of masters") sys.exit(1) clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster) if os.path.exists(clusterdir): error("Please remove existing directory %s first..." % clusterdir) sys.exit(1) if not os.path.exists(clusterdir): os.makedirs(clusterdir) os.mkdir("%s/auth" % clusterdir) with open("%s/kcli_parameters.yml" % clusterdir, 'w') as p: installparam = overrides.copy() installparam['plan'] = plan installparam['kubetype'] = 'kind' yaml.safe_dump(installparam, p, default_flow_style=False, encoding='utf-8', allow_unicode=True) result = config.plan(plan, inputfile='%s/kcli_plan.yml' % plandir, overrides=data) if result['result'] != 'success': sys.exit(1) kindnode = "%s-kind" % cluster kindnodeip = "%s-kind" % cluster kindnodeip, kindnodevmport = _ssh_credentials(k, kindnode)[1:] source, destination = data['KUBECONFIG'], "%s/auth/kubeconfig" % clusterdir scpcmd = scp(kindnode, ip=kindnodeip, user='******', source=source, destination=destination, tunnel=config.tunnel, tunnelhost=config.tunnelhost, tunnelport=config.tunnelport, tunneluser=config.tunneluser, download=True, insecure=True, vmport=kindnodevmport) os.system(scpcmd) success("Kubernetes cluster %s deployed!!!" % cluster) info2("export KUBECONFIG=$HOME/.kcli/clusters/%s/auth/kubeconfig" % cluster) info2("export PATH=$PWD:$PATH")