def update_memory(self, name, memory): conn = self.conn memory = int(memory) try: vm = conn.find_machine(name) except: common.pprint("VM %s not found" % name, color='red') return {'result': 'failure', 'reason': "VM %s not found" % name} session = Session() vm.lock_machine(session, library.LockType.write) machine = session.machine machine.memory_size = memory machine.save_settings() session.unlock_machine() return {'result': 'success'}
def __init__(self, context=None, usecloning=False, host='127.0.0.1', port=22, user='******', debug=False): self.host = host self.port = port self.user = user self.usecloning = usecloning self.conn = 'OK' contexts, current = config.list_kube_config_contexts() if context is not None: contexts = [ entry for entry in contexts if entry['name'] == context ] if contexts: context = contexts[0] contextname = context['name'] else: self.conn = None else: context = current contextname = current['name'] config.load_kube_config(context=contextname) if 'namespace' in context['context']: self.namespace = context['context']['namespace'] else: self.namespace = 'default' self.crds = client.CustomObjectsApi() extensions = client.ApiextensionsV1beta1Api() current_crds = [ x for x in extensions.list_custom_resource_definition().to_dict() ['items'] if x['spec']['names']['kind'].lower() == 'virtualmachine' ] if not current_crds: common.pprint("Kubevirt not installed", color='red') self.conn = None self.host = context return self.core = client.CoreV1Api() self.debug = debug if host == '127.0.0.1' and len(contextname.split('/')) == 3 and len( contextname.split('/')[1].split(':')) == 2: self.host = contextname.split('/')[1].split(':')[0].replace( '-', '.') return
def list_products(self, group=None, repo=None): """ :param group: :param repo: :return: """ configdir = "%s/.kcli" % os.environ.get('HOME') if not os.path.exists(configdir): return [] else: products = [] repodirs = [ d.replace('repo_', '') for d in os.listdir(configdir) if os.path.isdir("%s/%s" % (configdir, d)) and d.startswith('repo_') ] for rep in repodirs: repometa = "%s/repo_%s/KMETA" % (configdir, rep) if not os.path.exists(repometa): continue else: with open(repometa, 'r') as entries: try: repoproducts = yaml.load(entries) for repoproduct in repoproducts: repoproduct['repo'] = rep if 'group' not in repoproduct: repoproduct['group'] = 'notavailable' if 'file' not in repoproduct: repoproduct['file'] = 'kcli_plan.yml' products.append(repoproduct) except yaml.scanner.ScannerError: common.pprint( "Couldn't properly parse .kcli/repo. Leaving...", color='red') continue if repo is not None: products = [ product for product in products if 'repo' in product and product['repo'] == repo ] if group is not None: products = [ product for product in products if 'group' in product and product['group'] == group ] return products
def report(self): """ :return: """ projects = [proj for proj in self.conn.list_projects() if proj.name == self.project or proj.id == self.project] if not projects: common.pprint("Project %s not found" % self.project, code='red') return project = projects[0] print("Project name: %s" % project.name) print("Project id: %s" % project.id) if self.facility is not None: print("Facility: %s" % self.facility) print("Vms Running: %s" % len(self.conn.list_devices(self.project))) return
def update_cpus(self, name, numcpus): """ :param name: :param numcpus: :return: """ conn = self.conn try: vm = conn.find_machine(name) except: common.pprint("VM %s not found" % name, color='red') return {'result': 'failure', 'reason': "VM %s not found" % name} vm.cpu_count = numcpus vm.save_settings() return {'result': 'success'}
def create_profile(self, profile, overrides={}, quiet=False): if profile in self.profiles: if not quiet: common.pprint("Profile %s already there" % profile, color='blue') return {'result': 'success'} if not overrides: return {'result': 'failure', 'reason': "You need to specify at least one parameter"} path = os.path.expanduser('~/.kcli/profiles.yml') rootdir = os.path.expanduser('~/.kcli') self.profiles[profile] = overrides if not os.path.exists(rootdir): os.makedirs(rootdir) with open(path, 'w') as profile_file: yaml.safe_dump(self.profiles, profile_file, default_flow_style=False, encoding='utf-8', allow_unicode=True, sort_keys=False) return {'result': 'success'}
def enable_host(self, client): """ :param client: :return: """ if client not in self.clients: common.pprint("Client %s not found in config.Leaving...." % client) return {'result': 'failure', 'reason': "Client %s not found in config" % client} common.pprint("Enabling client %s..." % client) self.ini[client]['enabled'] = True inifile = "%s/.kcli/config.yml" % os.environ.get('HOME') with open(inifile, 'w') as conf_file: yaml.safe_dump(self.ini, conf_file, default_flow_style=False, encoding='utf-8', allow_unicode=True, sort_keys=False) return {'result': 'success'}
def status(self, name): """ :param name: :return: """ status = None conn = self.conn project = self.project zone = self.zone try: vm = conn.instances().get(zone=zone, project=project, instance=name).execute() status = vm['status'] except: common.pprint("Vm %s not found" % name, color='red') return status
def add_image(self, image, pool, short=None, cmd=None, name=None, size=1): sizes = {'debian': 2, 'centos': 8, 'fedora': 4, 'rhel': 10, 'trusty': 2.2, 'xenial': 2.2, 'yakkety': 2.2, 'zesty': 2.2, 'artful': 2.2} core = self.core pool = self.check_pool(pool) namespace = self.namespace shortimage = os.path.basename(image).split('?')[0] if name is None: volname = [k for k in TEMPLATES if TEMPLATES[k] == image][0] else: volname = name.replace('_', '-').replace('.', '-').lower() for key in sizes: if key in shortimage and shortimage.endswith('qcow2'): size = sizes[key] break now = datetime.datetime.now().strftime("%Y%M%d%H%M") podname = '%s-%s-importer' % (now, volname) pvc = {'kind': 'PersistentVolumeClaim', 'spec': {'storageClassName': pool, 'accessModes': ['ReadWriteOnce'], 'resources': {'requests': {'storage': '%sGi' % size}}}, 'apiVersion': 'v1', 'metadata': {'name': volname, 'annotations': {'kcli/template': shortimage}}} pod = {'kind': 'Pod', 'spec': {'restartPolicy': 'Never', 'containers': [{'image': 'kubevirtci/disk-importer', 'volumeMounts': [{'mountPath': '/storage', 'name': 'storage1'}], 'name': 'importer', 'env': [{'name': 'CURL_OPTS', 'value': '-L'}, {'name': 'INSTALL_TO', 'value': '/storage/disk.img'}, {'name': 'URL', 'value': image}]}], 'volumes': [{'name': 'storage1', 'persistentVolumeClaim': {'claimName': volname}}]}, 'apiVersion': 'v1', 'metadata': {'name': podname}} try: core.read_namespaced_persistent_volume_claim(volname, namespace) common.pprint("Using existing pvc") except: core.create_namespaced_persistent_volume_claim(namespace, pvc) bound = self.pvc_bound(volname, namespace) if not bound: return {'result': 'failure', 'reason': 'timeout waiting for pvc to get bound'} core.create_namespaced_pod(namespace, pod) completed = self.pod_completed(podname, namespace) if not completed: common.pprint("Issue with pod %s. Leaving it for debugging purposes" % podname, color='red') return {'result': 'failure', 'reason': 'timeout waiting for importer pod to complete'} else: core.delete_namespaced_pod(podname, namespace, client.V1DeleteOptions()) return {'result': 'success'}
def bootstrap(self, name, host, port, user, protocol, url, pool, poolpath): common.pprint("Bootstrapping env", color='green') if host is None and url is None: url = 'qemu:///system' host = '127.0.0.1' if pool is None: pool = 'default' if poolpath is None: poolpath = '/var/lib/libvirt/images' if host == '127.0.0.1': ini = {'default': {'client': 'local', 'cloudinit': True, 'tunnel': False, 'reservehost': False, 'insecure': True, 'enableroot': True, 'reserveip': False, 'reservedns': False, 'reservehost': False, 'nested': True, 'start': True}, 'local': {'pool': pool, 'nets': ['default']}} if not sys.platform.startswith('linux'): ini['local']['type'] = 'vbox' else: if name is None: name = host ini = {'default': {'client': name, 'cloudinit': True, 'tunnel': True, 'reservehost': False, 'insecure': True, 'enableroot': True, 'reserveip': False, 'reservedns': False, 'reservehost': False, 'nested': True, 'start': True}} ini[name] = {'host': host, 'pool': pool, 'nets': ['default']} if protocol is not None: ini[name]['protocol'] = protocol if user is not None: ini[name]['user'] = user if port is not None: ini[name]['port'] = port if url is not None: ini[name]['url'] = url path = os.path.expanduser('~/.kcli/config.yml') rootdir = os.path.expanduser('~/.kcli') if os.path.exists(path): copyfile(path, "%s.bck" % path) if not os.path.exists(rootdir): os.makedirs(rootdir) with open(path, 'w') as conf_file: yaml.safe_dump(ini, conf_file, default_flow_style=False, encoding='utf-8', allow_unicode=True) common.pprint("Environment bootstrapped!", color='green')
def create_network(self, name, cidr=None, dhcp=True, nat=True, domain=None, plan='kvirt', overrides={}): if nat: externalnets = [n for n in self.neutron.list_networks()['networks'] if n['router:external']] externalnet_id = externalnets[0]['id'] if externalnets else None routers = [router for router in self.neutron.list_routers()['routers'] if router['name'] == 'kvirt'] router_id = routers[0]['id'] if routers else None try: IPNetwork(cidr) except: return {'result': 'failure', 'reason': "Invalid Cidr %s" % cidr} neutron = self.neutron network_id = None networks = {net['name']: net['id'] for net in neutron.list_networks()['networks']} if name not in networks: network = {'name': name, 'admin_state_up': True} if 'port_security_enabled' in overrides: network['port_security_enabled'] = bool(overrides['port_security_enabled']) network = neutron.create_network({'network': network}) network_id = network['network']['id'] tenant_id = network['network']['tenant_id'] else: common.pprint("Network already there. Creating subnet", color='blue') if cidr is not None: if network_id is None: network_id = networks[name] cidrs = [s['cidr'] for s in neutron.list_subnets()['subnets'] if s['network_id'] == network_id] if cidr not in cidrs: subnet = {'name': cidr, 'network_id': network_id, 'ip_version': 4, "cidr": cidr, 'enable_dhcp': dhcp} if domain is not None: subnet['dns_nameservers'] = [domain] subnet = neutron.create_subnet({'subnet': subnet}) subnet_id = subnet['subnet']['id'] tenant_id = subnet['subnet']['tenant_id'] else: common.pprint("Subnet already there. Leaving", color='blue') return {'result': 'success'} if nat: if externalnet_id is not None: if router_id is None: router = {'name': 'kvirt', 'tenant_id': tenant_id} # router['external_gateway_info'] = {"network_id": externalnet_id, "enable_snat": True} router = neutron.create_router({'router': router}) router_id = router['router']['id'] router_dict = {"network_id": externalnet_id} neutron.add_gateway_router(router_id, router_dict) neutron.add_interface_router(router_id, {'subnet_id': subnet_id}) return {'result': 'success'}
def info_product(self, name, repo=None, group=None, web=False): """Info product""" if repo is not None and group is not None: products = [product for product in self.list_products if product['name'] == name and product['repo'] == repo and product['group'] == group] elif repo is not None: products = [product for product in self.list_products() if product['name'] == name and product['repo'] == repo] if group is not None: products = [product for product in self.list_products() if product['name'] == name and product['group'] == group] else: products = [product for product in self.list_products() if product['name'] == name] if len(products) == 0: common.pprint("Product not found. Leaving...", color='red') os._exit(1) elif len(products) > 1: common.pprint("Product found in several places. Specify repo or group", color='red') os._exit(1) else: product = products[0] repo = product['repo'] repodir = "%s/.kcli/plans/%s" % (os.environ.get('HOME'), repo) group = product['group'] _file = product['file'] description = product.get('description') numvms = product.get('numvms') image = product.get('image') comments = product.get('comments') if not web: if description is not None: print("description: %s" % description) if group is not None: print("group: %s" % group) if numvms is not None: numvmsinfo = "numvms: %s" % numvms if numvms == 1: numvmsinfo += " (Vm name can be overriden)" print(numvmsinfo) if image is not None: print("image: %s" % image) if comments is not None: print("Comments : %s" % comments) inputfile = "%s/%s" % (product['realdir'], _file) if 'realdir' in product else _file parameters = self.info_plan("%s/%s" % (repodir, inputfile), quiet=True, web=web) if web: return {'product': product, 'comments': comments, 'description': description, 'parameters': parameters}
def info(self, name, output='plain', fields=None, values=False): if fields is not None: fields = fields.split(',') nova = self.nova try: vm = nova.servers.find(name=name) except: common.pprint("VM %s not found" % name, color='red') return {'result': 'failure', 'reason': "VM %s not found" % name} if self.debug: print(vars(vm)) yamlinfo = { 'name': vm.name, 'status': vm.status, 'template': self.glance.images.get(vm.image['id']).name } flavor = nova.flavors.get(vm.flavor['id']) yamlinfo['memory'] = flavor.ram yamlinfo['cpus'] = flavor.vcpus yamlinfo['nets'] = [] index = 0 for key in list(vm.addresses): entry1 = vm.addresses[key] for entry2 in entry1: mac = entry2['OS-EXT-IPS-MAC:mac_addr'] if entry2['OS-EXT-IPS:type'] == 'floating': yamlinfo['ip'] = entry2['addr'] else: net = { 'device': 'eth%s' % index, 'mac': mac, 'net': key, 'type': entry2['addr'] } yamlinfo['nets'].append(net) index += 1 metadata = vm.metadata if metadata is not None: if 'plan' in metadata: yamlinfo['plan'] = metadata['plan'] if 'profile' in metadata: yamlinfo['profile'] = metadata['profile'] common.print_info(yamlinfo, output=output, fields=fields, values=values) return {'result': 'success'}
def create_loadbalancer(self, name, ports=[], checkpath='/index.html', vms=[], domain=None, checkport=80, alias=[], internal=False): ports = [int(port) for port in ports] resource = self.resource conn = self.conn elb = self.elb protocols = {80: 'HTTP', 8080: 'HTTP', 443: 'HTTPS'} Listeners = [] for port in ports: protocol = protocols[port] if port in protocols else 'TCP' Listener = {'Protocol': protocol, 'LoadBalancerPort': port, 'InstanceProtocol': protocol, 'InstancePort': port} Listeners.append(Listener) AvailabilityZones = ["%s%s" % (self.region, i) for i in ['a', 'b', 'c']] lb = elb.create_load_balancer(LoadBalancerName=name, Listeners=Listeners, AvailabilityZones=AvailabilityZones) sg = resource.create_security_group(GroupName=name, Description=name) sgid = sg.id sgtags = [{"Key": "Name", "Value": name}] sg.create_tags(Tags=sgtags) for port in ports: sg.authorize_ingress(GroupName=name, FromPort=port, ToPort=port, IpProtocol='tcp', CidrIp="0.0.0.0/0") if 80 in ports: HealthTarget = 'HTTP:80%s' % checkpath else: HealthTarget = '%s:%s' % (protocol, port) HealthCheck = {'Interval': 20, 'Target': HealthTarget, 'Timeout': 3, 'UnhealthyThreshold': 10, 'HealthyThreshold': 2} elb.configure_health_check(LoadBalancerName=name, HealthCheck=HealthCheck) common.pprint("Reserved dns name %s" % lb['DNSName']) if vms: Instances = [] for vm in vms: update = self.update_metadata(vm, 'loadbalancer', name, append=True) instanceid = self.get_id(vm) if update == 0 and instanceid is not None: Instances.append({"InstanceId": instanceid}) sgs = self.get_security_groups(vm) sgnames = [x['GroupName'] for x in sgs] if name not in sgnames: sgids = [x['GroupId'] for x in sgs] sgids.append(sgid) conn.modify_instance_attribute(InstanceId=instanceid, Groups=sgids) if Instances: elb.register_instances_with_load_balancer(LoadBalancerName=name, Instances=Instances) return
def ip(self, name): ip = None conn = self.conn project = self.project zone = self.zone try: vm = conn.instances().get(zone=zone, project=project, instance=name).execute() except: common.pprint("Vm %s not found" % name, color='red') return None if 'natIP' not in vm['networkInterfaces'][0]['accessConfigs'][0]: return None else: ip = vm['networkInterfaces'][0]['accessConfigs'][0]['natIP'] return ip
def delete(self, name, snapshots=False): """ :param name: :param snapshots: :return: """ vmsearch = self.vms_service.list(search='name=%s' % name) if not vmsearch: common.pprint("VM %s not found" % name, color='red') return {'result': 'failure', 'reason': "VM %s not found" % name} vminfo = vmsearch[0] vm = self.vms_service.vm_service(vminfo.id) if str(vminfo.status) == 'up': vm.stop() vm.remove() return {'result': 'success'}
def serialconsole(self, name): conn = self.conn try: vm = conn.find_machine(name) except: common.pprint("VM %s not found" % name, color='red') return {'result': 'failure', 'reason': "VM %s not found" % name} if not str(vm.state): common.pprint("VM down", color='red') return {'result': 'failure', 'reason': "VM %s down" % name} else: serial = vm.get_serial_port(0) if not serial.enabled: print("No serial Console found. Leaving...") return serialport = serial.path os.system("nc 127.0.0.1 %s" % serialport)
def console(self, name, tunnel=False): """ :param name: :param tunnel: :return: """ conn = self.conn try: vm = conn.find_machine(name) except: common.pprint("VM %s not found" % name, color='red') return {'result': 'failure', 'reason': "VM %s not found" % name} if self.status(name) == 'down': vm.launch_vm_process(None, 'gui', '') else: print("VM %s already running in headless mode.Use kcli console -s instead" % name)
def update_profile(self, profile, overrides={}, quiet=False): if profile not in self.profiles: if quiet: common.pprint("Profile %s not found" % profile, color='red') return {'result': 'failure', 'reason': 'Profile %s not found' % profile} if not overrides: return {'result': 'failure', 'reason': "You need to specify at least one parameter"} path = os.path.expanduser('~/.kcli/profiles.yml') self.profiles[profile].update(overrides) with open(path, 'w') as profile_file: try: yaml.safe_dump(self.profiles, profile_file, default_flow_style=False, encoding='utf-8', allow_unicode=True, sort_keys=False) except: yaml.safe_dump(self.profiles, profile_file, default_flow_style=False, encoding='utf-8', allow_unicode=True) return {'result': 'success'}
def process_inputfile(self, plan, inputfile, overrides={}, onfly=None, full=False, ignore=False, download_mode=False): basedir = os.path.dirname(inputfile) if os.path.dirname(inputfile) != '' else '.' basefile = None undefined = strictundefined if not ignore else defaultundefined env = Environment(loader=FileSystemLoader(basedir), undefined=undefined) try: templ = env.get_template(os.path.basename(inputfile)) except TemplateSyntaxError as e: common.pprint("Error rendering line %s of file %s. Got: %s" % (e.lineno, e.filename, e.message), color='red') os._exit(1) except TemplateError as e: common.pprint("Error rendering file %s. Got: %s" % (inputfile, e.message), color='red') os._exit(1) parameters = common.get_parameters(inputfile) if parameters is not None: parameters = yaml.safe_load(parameters)['parameters'] if not isinstance(parameters, dict): common.pprint("Error rendering parameters section of file %s" % inputfile, color='red') os._exit(1) for parameter in parameters: if parameter == 'baseplan': basefile = parameters['baseplan'] if onfly is not None: common.fetch("%s/%s" % (onfly, basefile), '.') baseparameters = common.get_parameters(basefile) if baseparameters is not None: baseparameters = yaml.safe_load(baseparameters)['parameters'] for baseparameter in baseparameters: if baseparameter not in overrides and baseparameter not in parameters: overrides[baseparameter] = baseparameters[baseparameter] elif parameter not in overrides: currentparameter = parameters[parameter] if isinstance(currentparameter, bool) and download_mode: currentparameter = True overrides[parameter] = currentparameter with open(inputfile, 'r') as entries: overrides.update(self.overrides) overrides.update({'plan': plan}) try: entries = templ.render(overrides) except TemplateError as e: common.pprint("Error rendering inputfile %s. Got: %s" % (inputfile, e.message), color='red') os._exit(1) if not full: entrieslist = entries.split('\n') if entrieslist[0].startswith('parameters:'): for index, line in enumerate(entrieslist[1:]): if re.match(r'\S', line): entries = '\n'.join(entrieslist[index + 1:]) break return entries entries = yaml.safe_load(entries) return entries, overrides, basefile, basedir
def ip(self, name): crds = self.crds namespace = self.namespace ip = None try: vm = crds.get_namespaced_custom_object(DOMAIN, VERSION, namespace, 'virtualmachines', name) status = vm['status'] if 'interfaces' in status: interfaces = vm['status']['interfaces'] for interface in interfaces: if 'ipAddress' in interface: ip = interface['ipAddress'] break except Exception: common.pprint("VM %s not found" % name, color='red') # return {'result': 'failure', 'reason': "VM %s not found" % name} os._exit(1) return ip
def console(self, name, tunnel=False): """ :param name: :param tunnel: :return: """ nova = self.nova try: vm = nova.servers.find(name=name) except: common.pprint("VM %s not found" % name, color='red') return {'result': 'failure', 'reason': "VM %s not found" % name} url = vm.get_vnc_console('novnc')['console']['url'] if self.debug: print(url) webbrowser.open(url, new=2, autoraise=True) return
def update_metadata(self, name, metatype, metavalue): """ :param name: :param metatype: :param metavalue: :return: """ nova = self.nova try: vm = nova.servers.find(name=name) except: common.pprint("VM %s not found" % name, color='red') return metadata = vm.metadata metadata[metatype] = metavalue nova.servers.set_meta(vm.id, metadata) return {'result': 'success'}
def console(self, name, tunnel=False, web=False): nova = self.nova try: vm = nova.servers.find(name=name) except: error("VM %s not found" % name) return {'result': 'failure', 'reason': "VM %s not found" % name} url = vm.get_vnc_console('novnc')['console']['url'] if web: return url if self.debug or os.path.exists("/i_am_a_container"): msg = "Open the following url:\n%s" % url if os.path.exists( "/i_am_a_container") else url pprint(msg) else: pprint("Opening url: %s" % url) webbrowser.open(url, new=2, autoraise=True) return
def delete_dns(self, name, domain, instanceid=None): dns = self.dns zone = [z['Id'].split('/')[2] for z in dns.list_hosted_zones_by_name()['HostedZones'] if z['Name'] == '%s.' % domain] if not zone: common.pprint("Domain not found", color='red') return {'result': 'failure', 'reason': "Domain not found"} zoneid = zone[0] entry = "%s.%s." % (name, domain) ip = self.ip(instanceid) if ip is None: print("Couldn't Get DNS Ip") return changes = [{'Action': 'DELETE', 'ResourceRecordSet': {'Name': entry, 'Type': 'A', 'TTL': 300, 'ResourceRecords': [{'Value': ip}]}}] entry = "%s.%s." % (name, domain) dns.change_resource_record_sets(HostedZoneId=zoneid, ChangeBatch={'Changes': changes}) return {'result': 'success'}
def list_repos(self): """ :return: """ reposfile = "%s/.kcli/repos.yml" % os.environ.get('HOME') if not os.path.exists(reposfile) or os.path.getsize(reposfile) == 0: repos = {} else: with open(reposfile, 'r') as entries: try: repos = yaml.load(entries) except yaml.scanner.ScannerError: common.pprint( "Couldn't properly parse .kcli/repos.yml. Leaving...", color='red') os._exit(1) return repos
def volumes(self, iso=False): core = self.core namespace = self.namespace if iso: return [] pvc = core.list_namespaced_persistent_volume_claim(namespace) templates = [ p.metadata.annotations['kcli/template'] for p in pvc.items if p.metadata.annotations is not None and 'kcli/template' in p.metadata.annotations ] if templates: return sorted(templates) else: common.pprint( "No pvc based templates found, defaulting to registry disks", color='blue') return REGISTRYDISKS
def get_ci_installer(pull_secret, tag=None, macosx=False): if tag is None: tags = [] r = urlopen("https://openshift-release.svc.ci.openshift.org/graph?format=dot").readlines() for line in r: tag_match = re.match('.*label="(.*.)", shape=.*', str(line)) if tag_match is not None: tags.append(tag_match.group(1)) tag = sorted(tags)[-1] if '/' not in str(tag): tag = 'registry.svc.ci.openshift.org/ocp/release:%s' % tag os.environ['OPENSHIFT_RELEASE_IMAGE'] = tag binary = 'openshift-install' msg = 'Downloading %s %s in current directory' % (binary, tag) pprint(msg, color='blue') cmd = "oc adm release extract --registry-config %s --command=%s --to . %s" % (pull_secret, binary, tag) cmd += "; chmod 700 %s" % binary call(cmd, shell=True)
def restart(self, name): """ :param name: :return: """ vmsearch = self.vms_service.list(search='name=%s' % name) if not vmsearch: common.pprint("VM %s not found" % name, color='red') return {'result': 'failure', 'reason': "VM %s not found" % name} vm = vmsearch[0] status = str(vm.status) vm = self.vms_service.vm_service(vmsearch[0].id) if status == 'down': vm.start() else: vm.reboot() return {'result': 'success'}
def update_cpus(self, name, numcpus): nova = self.nova try: vm = nova.servers.find(name=name) except: error("VM %s not found" % name) return {'result': 'failure', 'reason': "VM %s not found" % name} currentflavor = nova.flavors.get(vm.flavor['id']) if currentflavor.vcpus >= numcpus: warning("No need to resize") return {'result': 'success'} allflavors = [f for f in nova.flavors.list() if f != currentflavor] flavors = [ flavor for flavor in allflavors if flavor.ram >= currentflavor.ram and flavor.vcpus >= numcpus ] if flavors: flavor = flavors[0] pprint("Using flavor %s" % flavor.name) vm.resize(flavor.id) resizetimeout = 40 resizeruntime = 0 vmstatus = '' while vmstatus != 'VERIFY_RESIZE': if resizeruntime >= resizetimeout: error("Time out waiting for resize to finish") return { 'result': 'failure', 'reason': "Time out waiting for resize to finish" } vm = nova.servers.find(name=name) vmstatus = vm.status sleep(2) pprint("Waiting for vm %s to be in verify_resize" % name) resizeruntime += 2 vm.confirm_resize() return {'result': 'success'} else: error("Couldn't find matching flavor for this number of cpus") return { 'result': 'failure', 'reason': "Couldn't find matching flavor for this number of cpus" }