def create(self, name, virttype='kvm', profile='', plan='kvirt', cpumodel='Westmere', cpuflags=[], numcpus=2, memory=512, guestid='guestrhel764', pool='default', template=None, disks=[{ 'size': 10 }], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags={}): if cloudinit: common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, iso=False) return {'result': 'success'}
def create(self, name, virttype=None, profile='', flavor=None, plan='kvirt', cpumodel='Westmere', cpuflags=[], cpupinning=[], numcpus=2, memory=512, guestid='guestrhel764', pool='default', image=None, disks=[{ 'size': 10 }], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags=[], storemetadata=False, sharedfolders=[], kernel=None, initrd=None, cmdline=None, cpuhotplug=False, memoryhotplug=False, numamode=None, numa=[], pcidevices=[], tpm=False, placement=[], autostart=False, rng=False, metadata={}, securitygroups=[]): """ :param name: :param virttype: :param profile: :param flavor: :param plan: :param cpumodel: :param cpuflags: :param cpupinning: :param numcpus: :param memory: :param guestid: :param pool: :param image: :param disks: :param disksize: :param diskthin: :param diskinterface: :param nets: :param iso: :param vnc: :param cloudinit: :param reserveip: :param reservedns: :param reservehost: :param start: :param keys: :param cmds: :param ips: :param netmasks: :param gateway: :param nested: :param dns: :param domain: :param tunnel: :param files: :param enableroot: :param alias: :param overrides: :param tags: :param cpuhotplug: :param memoryhotplug: :param numamode: :param numa: :param pcidevices: :param tpm: :return: """ reservation_id = overrides.get('hardware_reservation_id') if reservation_id is not None: reservations = self.conn.list_hardware_reservations(self.project) if not reservations: return { 'result': 'failure', 'reason': "No reserved hardware found" } elif reservation_id != 'next-available': matching_ids = [ r.id for r in reservations if r.id == reservation_id or r.short_id == reservation_id ] if not matching_ids: return { 'result': 'failure', 'reason': "Reserved hardware with id %s not found" % reservation_id } else: reservation_id = matching_ids[0] ipxe_script_url = None userdata = None networkid = None networkids = [] vlan = False for index, network in enumerate(nets): if index > 1: warning("Ignoring net higher than %s" % index) break if isinstance(network, str): networkname = network elif isinstance(network, dict) and 'name' in network: networkname = network['name'] if networkname != 'default': networks = [ n for n in self.conn.list_vlans(self.project) if n.id == networkname or (n.description is not None and n.description == networkname) ] if not networks: return { 'result': 'failure', 'reason': "Network %s not found" % networkname } else: vlan = True networkid = networks[0].id else: networkid = None networkids.append(networkid) if image is not None and not common.needs_ignition(image): if '_' not in image and image in [ 'rhel8', 'rhel7', 'centos7', 'centos8' ]: image = image[:-1] + '_' + image[-1:] pprint("Using image %s" % image) found = False for img in self.conn.list_operating_systems(): if img.slug == image: found = True if not found: msg = "image %s doesn't exist" % image return {'result': 'failure', 'reason': msg} elif image is None: ipxe_script_url = overrides.get('ipxe_script_url') if ipxe_script_url is None: return { 'result': 'failure', 'reason': 'You need to define ipxe_script_url as parameter' } image = 'custom_ipxe' else: ignition_url = overrides.get('ignition_url') if ignition_url is None: if self.tunnelhost is not None: ignition_url = "http://%s/%s.ign" % (self.tunnelhost, name) else: return { 'result': 'failure', 'reason': 'You need to define ignition_url as parameter' } url = IMAGES[image] if 'rhcos' in image: if 'commit_id' in overrides: kernel, initrd, metal = common.get_commit_rhcos_metal( overrides['commit_id']) else: kernel, initrd, metal = common.get_latest_rhcos_metal(url) elif 'fcos' in image: kernel, initrd, metal = common.get_latest_fcos_metal(url) interface = 'eth0' if 'fcos' in image else 'ens3f0' userdata = self._ipxe(kernel, initrd, metal, ignition_url, interface) version = common.ignition_version(image) ignitiondir = '/tmp' ipv6 = [] ignitiondata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, version=version, plan=plan, ipv6=ipv6, image=image) image = 'custom_ipxe' with open('%s/%s.ign' % (ignitiondir, name), 'w') as ignitionfile: ignitionfile.write(ignitiondata) if self.tunnelhost is not None: pprint("Copying ignition data to %s" % self.tunnelhost) scpcmd = "scp -qP %s /tmp/%s.ign %s@%s:%s/%s.ign" % ( self.tunnelport, name, self.tunneluser, self.tunnelhost, self.tunneldir, name) os.system(scpcmd) if flavor is None: # if f[1] >= numcpus and f[2] >= memory: minmemory = 512000 for f in self.conn.list_plans(): if not f.specs: continue flavorname = f.name # skip this flavor until we know where it can be launched if flavorname == 'c3.small.x86' or ( vlan and flavorname in ['t1.small.x86', 'c1.small.x86']): continue flavorcpus = int(f.specs['cpus'][0]['count']) flavormemory = int(f.specs['memory']['total'].replace( 'GB', '')) * 1024 if flavorcpus >= 1 and flavormemory >= memory and flavormemory < minmemory: flavor = flavorname minmemory = flavormemory validfacilities = f.available_in if flavor is None: return { 'result': 'failure', 'reason': 'Couldnt find flavor matching requirements' } pprint("Using flavor %s" % flavor) else: flavors = [f for f in self.conn.list_plans() if f.slug == flavor] if not flavors: return { 'result': 'failure', 'reason': 'Flavors %s not found' % flavor } else: validfacilities = flavors[0].available_in features = ['tpm'] if tpm else [] if cloudinit and userdata is None: userdata = common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, fqdn=True, storemetadata=storemetadata)[0] validfacilities = [ os.path.basename(e['href']) for e in validfacilities ] validfacilities = [ f.code for f in self.conn.list_facilities() if f.id in validfacilities ] if not validfacilities: return { 'result': 'failure', 'reason': 'no valid facility found for flavor %s' % flavor } facility = overrides.get('facility') if facility is not None: matchingfacilities = [ f for f in self.conn.list_facilities() if f.slug == facility ] if not matchingfacilities: return { 'result': 'failure', 'reason': 'Facility %s not found' % facility } if facility not in validfacilities: return { 'result': 'failure', 'reason': 'Facility %s not allowed. You should choose between %s' % (facility, ','.join(validfacilities)) } elif self.facility is not None: if self.facility not in validfacilities: return { 'result': 'failure', 'reason': 'Facility %s not allowed. You should choose between %s' % (self.facility, ','.join(validfacilities)) } facility = self.facility else: facility = validfacilities[0] tags = ['project_%s' % self.project] if userdata is not None and 'ignition' in userdata: tags.append("kernel_%s" % os.path.basename(kernel)) for entry in [field for field in metadata if field in METADATA_FIELDS]: tags.append("%s_%s" % (entry, metadata[entry])) # ip_addresses = [{"address_family": 4, "public": True}, {"address_family": 6, "public": False}] data = { 'project_id': self.project, 'hostname': name, 'plan': flavor, 'facility': facility, 'operating_system': image, 'userdata': userdata, 'features': features, 'tags': tags } if ipxe_script_url is not None: data['ipxe_script_url'] = ipxe_script_url if reservation_id is not None: data['hardware_reservation_id'] = reservation_id try: device = self.conn.create_device(**data) except Exception as e: return {'result': 'failure', 'reason': e} for networkid in networkids: if networkid is None: continue elif 'cluster' in overrides and name.startswith( "%s-" % overrides['cluster']): warning( "Not applying custom vlan to speed process for openshift..." ) warning("This will be applied manually later...") continue status = 'provisioning' while status != 'active': status = self.info(name).get('status') pprint("Waiting 5s for %s to be active..." % name) sleep(5) device_port_id = device["network_ports"][2]["id"] self.conn.disbond_ports(device_port_id, False) self.conn.assign_port(device_port_id, networkid) break return {'result': 'success'}
def create(self, name, virttype='vbox', profile='kvirt', flavor=None, plan='kvirt', cpumodel='', cpuflags=[], cpupinning=[], numcpus=2, memory=512, guestid='Linux_64', pool='default', image=None, disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags=None, dnsclient=None, autostart=False, cpuhotplug=False, memoryhotplug=False, numamode=None, numa=[]): """ :param name: :param virttype: :param profile: :param flavor: :param plan: :param cpumodel: :param cpuflags: :param cpupinning: :param numcpus: :param memory: :param guestid: :param pool: :param image: :param disks: :param disksize: :param diskthin: :param diskinterface: :param nets: :param iso: :param vnc: :param cloudinit: :param reserveip: :param reservedns: :param reservehost: :param start: :param keys: :param cmds: :param ips: :param netmasks: :param gateway: :param nested: :param dns: :param domain: :param tunnel: :param files: :param enableroot: :param alias: :param overrides: :param tags: :param cpuhotplug: :param memoryhotplug: :param numamode: :param numa: :return: """ if self.exists(name): return {'result': 'failure', 'reason': "VM %s already exists" % name} guestid = 'Linux_64' default_diskinterface = diskinterface default_diskthin = diskthin default_disksize = disksize default_pool = pool default_poolpath = '/tmp' conn = self.conn vm = conn.create_machine("", name, [], guestid, "") vm.cpu_count = numcpus vm.add_storage_controller('SATA', library.StorageBus(2)) vm.add_storage_controller('IDE', library.StorageBus(1)) vm.memory_size = memory vm.description = plan vm.set_extra_data('profile', profile) creationdate = time.strftime("%d-%m-%Y %H:%M", time.gmtime()) vm.set_extra_data('creationdate', creationdate) serial = vm.get_serial_port(0) serial.server = True serial.enabled = True serial.path = str(common.get_free_port()) serial.host_mode = library.PortMode.tcp nat_networks = [network.network_name for network in conn.nat_networks] internal_networks = [network for network in conn.internal_networks] for index, net in enumerate(nets): ip = None nic = vm.get_network_adapter(index) nic.adapter_type = library.NetworkAdapterType.virtio nic.enabled = True if isinstance(net, str): network = net elif isinstance(net, dict) and 'name' in net: network = net['name'] if ips and len(ips) > index and ips[index] is not None: ip = ips[index] vm.set_extra_data('ip', ip) nets[index]['ip'] = ip elif 'ip' in nets[index]: ip = nets[index]['ip'] vm.set_extra_data('ip', ip) if 'mac' in nets[index]: nic.mac_address = nets[index]['mac'].replace(':', '') if network in internal_networks: nic.attachment_type = library.NetworkAttachmentType.internal nic.internal_network = network elif network in nat_networks: nic.attachment_type = library.NetworkAttachmentType.nat_network nic.nat_network = network if index == 0: natengine = nic.nat_engine nat_network = [n for n in conn.nat_networks if n.network_name == network][0] nat_network.add_port_forward_rule(False, 'ssh_%s' % name, library.NATProtocol.tcp, '', common.get_free_port(), '', 22) else: nic.attachment_type = library.NetworkAttachmentType.nat if index == 0: natengine = nic.nat_engine natengine.add_redirect('ssh_%s' % name, library.NATProtocol.tcp, '', common.get_free_port(), '', 22) vm.save_settings() conn.register_machine(vm) session = Session() vm.lock_machine(session, library.LockType.write) machine = session.machine if iso is None and cloudinit: if image is not None: guestcmds = self.guestinstall(image) if not cmds: cmds = guestcmds elif 'rhel' in image: register = [c for c in cmds if 'subscription-manager' in c] if register: index = cmds.index(register[-1]) cmds[index + 1:index + 1] = guestcmds else: cmds = guestcmds + cmds else: cmds = guestcmds + cmds cmds = cmds + ['reboot'] common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot) medium = conn.create_medium('RAW', '/tmp/%s.ISO' % name, library.AccessMode.read_only, library.DeviceType.dvd) progress = medium.create_base_storage(368, [library.MediumVariant.fixed]) progress.wait_for_completion() dvd = conn.open_medium('/tmp/%s.ISO' % name, library.DeviceType.dvd, library.AccessMode.read_only, False) machine.attach_device("IDE", 0, 0, library.DeviceType.dvd, dvd) for index, disk in enumerate(disks): if disk is None: disksize = default_disksize diskthin = default_diskthin diskinterface = default_diskinterface diskpool = default_pool # diskpoolpath = default_poolpath elif isinstance(disk, int): disksize = disk diskthin = default_diskthin diskinterface = default_diskinterface diskpool = default_pool # diskpoolpath = default_poolpath elif isinstance(disk, str) or disk.isdigit(): disksize = int(disk) diskthin = default_diskthin diskinterface = default_diskinterface diskpool = default_pool # diskpoolpath = default_poolpath elif isinstance(disk, dict): disksize = disk.get('size', default_disksize) diskthin = disk.get('thin', default_diskthin) diskinterface = disk.get('interface', default_diskinterface) diskpool = disk.get('pool', default_pool) # diskpoolpath = default_poolpath else: return {'result': 'failure', 'reason': "Invalid disk entry"} diskname = "%s_%d" % (name, index) if image is not None and index == 0: diskpath = self.create_disk(diskname, disksize, pool=diskpool, thin=diskthin, image=image) machine.set_extra_data('image', image) # return {'result': 'failure', 'reason': "Invalid image %s" % image} else: diskpath = self.create_disk(diskname, disksize, pool=diskpool, thin=diskthin, image=None) disk = conn.open_medium(diskpath, library.DeviceType.hard_disk, library.AccessMode.read_write, False) disksize = disksize * 1024 * 1024 * 1024 progress = disk.resize(disksize) progress.wait_for_completion() machine.attach_device("SATA", index, 0, library.DeviceType.hard_disk, disk) poolpath = default_poolpath for p in self._pool_info(): poolname = p['name'] if poolname == pool: poolpath = p['path'] if iso is not None: if not os.path.isabs(iso): iso = "%s/%s" % (poolpath, iso) if not os.path.exists(iso): return {'result': 'failure', 'reason': "Invalid iso %s" % iso} medium = conn.create_medium('RAW', iso, library.AccessMode.read_only, library.DeviceType.dvd) Gb = 1 * 1024 * 1024 * 1024 progress = medium.create_base_storage(Gb, [library.MediumVariant.fixed]) progress.wait_for_completion() dvd = conn.open_medium(iso, library.DeviceType.dvd, library.AccessMode.read_only, False) machine.attach_device("IDE", 0, 0, library.DeviceType.dvd, dvd) # if nested and virttype == 'kvm': # print "prout" # else: # print "prout" # if reserveip: # vmxml = '' # macs = [] # for element in vmxml.getiterator('interface'): # mac = element.find('mac').get('address') # macs.append(mac) # self._reserve_ip(name, nets, macs) # if reservedns: # self.reserve_dns(name, nets, domain) machine.save_settings() session.unlock_machine() if start: self.start(name) # if reservehost: # common.reserve_host(name, nets, domain) return {'result': 'success'}
def create(self, name, virttype='kvm', profile='', flavor=None, plan='kvirt', cpumodel='Westmere', cpuflags=[], numcpus=2, memory=512, guestid='guestrhel764', pool='default', template=None, disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags=None, dnshost=None, storemetadata=False): """ :param name: :param virttype: :param profile: :param flavor: :param plan: :param cpumodel: :param cpuflags: :param numcpus: :param memory: :param guestid: :param pool: :param template: :param disks: :param disksize: :param diskthin: :param diskinterface: :param nets: :param iso: :param vnc: :param cloudinit: :param reserveip: :param reservedns: :param reservehost: :param start: :param keys: :param cmds: :param ips: :param netmasks: :param gateway: :param nested: :param dns: :param domain: :param tunnel: :param files: :param enableroot: :param alias: :param overrides: :param tags: :return: """ template = self.__evaluate_template(template) keypair = self.keypair if template is not None and not template.startswith('ami-'): return {'result': 'failure', 'reason': 'Invalid template %s' % template} defaultsubnetid = None if flavor is None: matchingflavors = [f for f in static_flavors if static_flavors[f]['cpus'] >= numcpus and static_flavors[f]['memory'] >= memory] if matchingflavors: flavor = matchingflavors[0] common.pprint("Using instance type %s" % flavor, color='green') else: return {'result': 'failure', 'reason': 'Couldnt find instance type matching requirements'} conn = self.conn tags = [{'ResourceType': 'instance', 'Tags': [{'Key': 'Name', 'Value': name}, {'Key': 'plan', 'Value': plan}, {'Key': 'hostname', 'Value': name}, {'Key': 'profile', 'Value': profile}]}] if keypair is None: keypair = 'kvirt_%s' % self.access_key_id keypairs = [k for k in conn.describe_key_pairs()['KeyPairs'] if k['KeyName'] == keypair] if not keypairs: common.pprint("Importing your public key as %s" % keypair, color='green') if not os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME'])\ and not os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME'])\ and not os.path.exists("%s/.kcli/id_rsa.pub" % os.environ['HOME'])\ and not os.path.exists("%s/.kcli/id_dsa.pub" % os.environ['HOME']): common.pprint("No public key found. Leaving", color='red') return {'result': 'failure', 'reason': 'No public key found'} elif os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME']): homekey = open("%s/.ssh/id_rsa.pub" % os.environ['HOME']).read() elif os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME']): homekey = open("%s/.ssh/id_dsa.pub" % os.environ['HOME']).read() elif os.path.exists("%s/.kcli/id_rsa.pub" % os.environ['HOME']): homekey = open("%s/.kcli/id_rsa.pub" % os.environ['HOME']).read() else: homekey = open("%s/.kcli/id_dsa.pub" % os.environ['HOME']).read() conn.import_key_pair(KeyName=keypair, PublicKeyMaterial=homekey) if cloudinit: if template is not None and (template.startswith('coreos') or template.startswith('rhcos')): etcd = None userdata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, etcd=etcd) else: common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, iso=False, fqdn=True, storemetadata=storemetadata) userdata = open('/tmp/user-data', 'r').read() else: userdata = '' networkinterfaces = [] blockdevicemappings = [] privateips = [] for index, net in enumerate(nets): networkinterface = {'DeleteOnTermination': True, 'Description': "eth%s" % index, 'DeviceIndex': index, 'Groups': ['string'], 'SubnetId': 'string'} ip = None if isinstance(net, str): netname = net netpublic = True elif isinstance(net, dict) and 'name' in net: netname = net['name'] ip = net.get('ip') alias = net.get('alias') netpublic = net.get('public', True) networkinterface['AssociatePublicIpAddress'] = netpublic if index == 0 else False if netname == 'default': if defaultsubnetid is not None: netname = defaultsubnetid else: # Filters = [{'Name': 'isDefault', 'Values': ['True']}] # vpcs = conn.describe_vpcs(Filters=Filters) vpcs = conn.describe_vpcs() vpcid = [vpc['VpcId'] for vpc in vpcs['Vpcs'] if vpc['IsDefault']][0] # Filters = [{'Name': 'vpc-id', 'Values': [vpcid]}, {'Name': 'default-for-az', 'Values': ['True']}] subnets = conn.describe_subnets() subnetid = [subnet['SubnetId'] for subnet in subnets['Subnets'] if subnet['DefaultForAz'] and subnet['VpcId'] == vpcid][0] netname = subnetid defaultsubnetid = netname common.pprint("Using subnet %s as default" % defaultsubnetid, color='green') if ips and len(ips) > index and ips[index] is not None: ip = ips[index] if index == 0: networkinterface['PrivateIpAddress'] = ip privateip = {'Primary': True, 'PrivateIpAddress': ip} else: privateip = {'Primary': False, 'PrivateIpAddress': ip} privateips = privateips.append(privateip) networkinterface['SubnetId'] = netname networkinterfaces.append(networkinterface) if len(privateips) > 1: networkinterface['PrivateIpAddresses'] = privateips for index, disk in enumerate(disks): letter = chr(index + ord('a')) devicename = '/dev/sd%s1' % letter if index == 0 else '/dev/sd%s' % letter blockdevicemapping = {'DeviceName': devicename, 'Ebs': {'DeleteOnTermination': True, 'VolumeType': 'standard'}} if isinstance(disk, int): disksize = disk elif isinstance(disk, str) and disk.isdigit(): disksize = str(disk) elif isinstance(disk, dict): disksize = disk.get('size', '10') blockdevicemapping['Ebs']['VolumeType'] = disk.get('type', 'standard') blockdevicemapping['Ebs']['VolumeSize'] = disksize blockdevicemappings.append(blockdevicemapping) if reservedns and domain is not None: tags[0]['Tags'].append({'Key': 'domain', 'Value': domain}) if dnshost is not None: tags[0]['Tags'].append({'Key': 'dnshost', 'Value': dnshost}) conn.run_instances(ImageId=template, MinCount=1, MaxCount=1, InstanceType=flavor, KeyName=keypair, BlockDeviceMappings=blockdevicemappings, UserData=userdata, TagSpecifications=tags) common.pprint("%s created on aws" % name, color='green') if reservedns and domain is not None: self.reserve_dns(name, nets=nets, domain=domain, alias=alias, instanceid=name) return {'result': 'success'}
def create(self, name, virttype='kvm', profile='', flavor=None, plan='kvirt', cpumodel='Westmere', cpuflags=[], numcpus=2, memory=512, guestid='guestrhel764', pool='default', template=None, disks=[{ 'size': 10 }], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags=None, dnshost=None, storemetadata=False): """ :param name: :param virttype: :param profile: :param flavor: :param plan: :param cpumodel: :param cpuflags: :param numcpus: :param memory: :param guestid: :param pool: :param template: :param disks: :param disksize: :param diskthin: :param diskinterface: :param nets: :param iso: :param vnc: :param cloudinit: :param reserveip: :param reservedns: :param reservehost: :param start: :param keys: :param cmds: :param ips: :param netmasks: :param gateway: :param nested: :param dns: :param domain: :param tunnel: :param files: :param enableroot: :param alias: :param overrides: :param tags: :return: """ if cloudinit: common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, iso=False) return {'result': 'success'}
def create(self, name, virttype='vbox', title='', description='kvirt', numcpus=2, memory=512, guestid='Linux_64', pool='default', template=None, disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, start=True, keys=None, cmds=None, ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[]): guestid = 'Linux_64' default_diskinterface = diskinterface default_diskthin = diskthin default_disksize = disksize default_pool = pool default_poolpath = '/tmp' conn = self.conn vm = conn.create_machine("", name, [], guestid, "") vm.cpu_count = numcpus vm.add_storage_controller('SATA', library.StorageBus(2)) vm.add_storage_controller('IDE', library.StorageBus(1)) vm.memory_size = memory vm.description = description vm.set_extra_data('profile', title) serial = vm.get_serial_port(0) serial.server = True serial.enabled = True serial.path = str(common.get_free_port()) serial.host_mode = library.PortMode.tcp for index, net in enumerate(nets): nic = vm.get_network_adapter(index) nic.enabled = True nic.attachment_type = library.NetworkAttachmentType.nat if index == 0: natengine = nic.nat_engine natengine.add_redirect('ssh', library.NATProtocol.tcp, '', common.get_free_port(), '', 22) if isinstance(net, str): # nic.attachment_type = library.NetworkAttachmentType.internal # nic.attachment_type = library.NetworkAttachmentType.nat # nic.attachment_type = library.NetworkAttachmentType.nat_network # nic.internal_network = net # nic.nat_network = net continue elif isinstance(net, dict) and 'name' in net: # nic.internal_network = net['name'] # nic.nat_network = net['name'] ip = None if ips and len(ips) > index and ips[index] is not None: ip = ips[index] nets[index]['ip'] = ip elif 'ip' in nets[index]: ip = nets[index]['ip'] if 'mac' in nets[index]: nic.mac_address = nets[index]['mac'].replace(':', '') vm.save_settings() conn.register_machine(vm) session = Session() vm.lock_machine(session, library.LockType.write) machine = session.machine if cloudinit: common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files) medium = conn.create_medium('RAW', '/tmp/%s.iso' % name, library.AccessMode.read_only, library.DeviceType.dvd) progress = medium.create_base_storage(368, [library.MediumVariant.fixed]) progress.wait_for_completion() dvd = conn.open_medium('/tmp/%s.iso' % name, library.DeviceType.dvd, library.AccessMode.read_only, False) machine.attach_device("IDE", 0, 0, library.DeviceType.dvd, dvd) for index, disk in enumerate(disks): if disk is None: disksize = default_disksize diskthin = default_diskthin diskinterface = default_diskinterface diskpool = default_pool # diskpoolpath = default_poolpath elif isinstance(disk, int): disksize = disk diskthin = default_diskthin diskinterface = default_diskinterface diskpool = default_pool # diskpoolpath = default_poolpath elif isinstance(disk, dict): disksize = disk.get('size', default_disksize) diskthin = disk.get('thin', default_diskthin) diskinterface = disk.get('interface', default_diskinterface) diskpool = disk.get('pool', default_pool) # diskpoolpath = default_poolpath else: return {'result': 'failure', 'reason': "Invalid disk entry"} diskname = "%s_%d" % (name, index) if template is not None and index == 0: diskpath = self.create_disk(diskname, disksize, pool=diskpool, thin=diskthin, template=template) machine.set_extra_data('template', template) # return {'result': 'failure', 'reason': "Invalid template %s" % template} else: diskpath = self.create_disk(diskname, disksize, pool=diskpool, thin=diskthin, template=None) disk = conn.open_medium(diskpath, library.DeviceType.hard_disk, library.AccessMode.read_write, False) print disksize disksize = disksize * 1024 * 1024 * 1024 progress = disk.resize(disksize) progress.wait_for_completion() machine.attach_device("SATA", index, 0, library.DeviceType.hard_disk, disk) machine.save_settings() session.unlock_machine() if start: self.start(name) return {'result': 'success'} if iso is None: if cloudinit: iso = "%s/%s.iso" % (default_poolpath, name) else: iso = '' else: try: if os.path.isabs(iso): shortiso = os.path.basename(iso) else: shortiso = iso # iso = "%s/%s" % (default_poolpath, iso) # iso = "%s/%s" % (isopath, iso) print shortiso except: return {'result': 'failure', 'reason': "Invalid iso %s" % iso} # if nested and virttype == 'kvm': # print "prout" # else: # print "prout" # if reserveip: # vmxml = '' # macs = [] # for element in vmxml.getiterator('interface'): # mac = element.find('mac').get('address') # macs.append(mac) # self._reserve_ip(name, nets, macs) # if reservedns: # self._reserve_dns(name, nets, domain) return {'result': 'success'}
def create(self, name, virttype='kvm', profile='', plan='kvirt', flavor=None, cpumodel='Westmere', cpuflags=[], numcpus=2, memory=512, guestid='guestrhel764', pool='default', template=None, disks=[{ 'size': 10 }], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags=None): """ :param name: :param virttype: :param profile: :param plan: :param flavor: :param cpumodel: :param cpuflags: :param numcpus: :param memory: :param guestid: :param pool: :param template: :param disks: :param disksize: :param diskthin: :param diskinterface: :param nets: :param iso: :param vnc: :param cloudinit: :param reserveip: :param reservedns: :param reservehost: :param start: :param keys: :param cmds: :param ips: :param netmasks: :param gateway: :param nested: :param dns: :param domain: :param tunnel: :param files: :param enableroot: :param alias: :param overrides: :param tags: :return: """ glance = self.glance nova = self.nova neutron = self.neutron try: nova.servers.find(name=name) common.pprint("VM %s already exists" % name, color='red') return { 'result': 'failure', 'reason': "VM %s already exists" % name } except: pass allflavors = [f for f in nova.flavors.list()] allflavornames = [flavor.name for flavor in allflavors] if flavor is None: flavors = [ flavor for flavor in allflavors if flavor.ram >= memory and flavor.vcpus == numcpus ] flavor = flavors[0] if flavors else nova.flavors.find( name="m1.tiny") common.pprint("Using flavor %s" % flavor.name, color='green') elif flavor not in allflavornames: return { 'result': 'failure', 'reason': "Flavor %s not found" % flavor } else: flavor = nova.flavors.find(name=flavor) nics = [] for net in nets: if isinstance(net, str): netname = net elif isinstance(net, dict) and 'name' in net: netname = net['name'] try: net = nova.neutron.find_network(name=netname) except Exception as e: common.pprint(e, color='red') return { 'result': 'failure', 'reason': "Network %s not found" % netname } nics.append({'net-id': net.id}) image = None if template is not None: images = [ image for image in glance.images.list() if image.name == template ] if images: image = images[0] else: msg = "you don't have template %s" % template return {'result': 'failure', 'reason': msg} block_dev_mapping = {} for index, disk in enumerate(disks): imageref = None diskname = "%s-disk%s" % (name, index) letter = chr(index + ord('a')) if isinstance(disk, int): disksize = disk diskthin = True elif isinstance(disk, str) and disk.isdigit(): disksize = int(disk) diskthin = True elif isinstance(disk, dict): disksize = disk.get('size', '10') diskthin = disk.get('thin', True) if index == 0 and template is not None: if not diskthin: imageref = image.id else: continue newvol = self.cinder.volumes.create(name=diskname, size=disksize, imageRef=imageref) block_dev_mapping['vd%s' % letter] = newvol.id key_name = 'kvirt' keypairs = [k.name for k in nova.keypairs.list()] if key_name not in keypairs: homekey = None if not os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME'])\ and not os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME']): print( "neither id_rsa.pub or id_dsa public keys found in your .ssh directory, you might have trouble " "accessing the vm") else: if os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME']): homekey = open("%s/.ssh/id_rsa.pub" % os.environ['HOME']).read() else: homekey = open("%s/.ssh/id_dsa.pub" % os.environ['HOME']).read() nova.keypairs.create(key_name, homekey) elif keypairs: key_name = keypairs[0] if key_name != 'kvirt': common.pprint('Using keypair %s' % key_name, color='green') else: common.pprint( 'Couldnt locate or create keypair for use. Leaving...', color='red') return {'result': 'failure', 'reason': "No usable keypair found"} meta = {'plan': plan, 'profile': profile} userdata = None if cloudinit: if template is not None and (template.startswith('coreos') or template.startswith('rhcos')): etcd = None userdata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, etcd=etcd) else: common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, iso=False) userdata = open('/tmp/user-data', 'r').read().strip() instance = nova.servers.create(name=name, image=image, flavor=flavor, key_name=key_name, nics=nics, meta=meta, userdata=userdata, block_device_mapping=block_dev_mapping) tenant_id = instance.tenant_id floating_ips = [ f['id'] for f in neutron.list_floatingips()['floatingips'] if f['port_id'] is None ] if not floating_ips: network_id = None networks = [ n for n in neutron.list_networks()['networks'] if n['router:external'] ] if networks: network_id = networks[0]['id'] if network_id is not None and tenant_id is not None: args = dict(floating_network_id=network_id, tenant_id=tenant_id) floating_ip = neutron.create_floatingip( body={'floatingip': args}) floatingip_id = floating_ip['floatingip']['id'] floatingip_ip = floating_ip['floatingip'][ 'floating_ip_address'] common.pprint('Assigning new floating ip %s for this vm' % floatingip_ip, color='green') else: floatingip_id = floating_ips[0] fixed_ip = None timeout = 0 while fixed_ip is None: common.pprint("Waiting 5 seconds for vm to get an ip", color='green') sleep(5) timeout += 5 if timeout >= 80: common.pprint("Time out waiting for vm to get an ip", color='red') break vm = nova.servers.get(instance.id) for key in list(vm.addresses): entry1 = vm.addresses[key] for entry2 in entry1: if entry2['OS-EXT-IPS:type'] == 'fixed': fixed_ip = entry2['addr'] break if fixed_ip is not None: fixedports = [ i['id'] for i in neutron.list_ports()['ports'] if i['fixed_ips'] and i['fixed_ips'][0]['ip_address'] == fixed_ip ] port_id = fixedports[0] neutron.update_floatingip(floatingip_id, {'floatingip': { 'port_id': port_id }}) securitygroups = [ s for s in neutron.list_security_groups()['security_groups'] if s['name'] == 'default' and s['tenant_id'] == tenant_id ] if securitygroups: securitygroup = securitygroups[0] securitygroupid = securitygroup['id'] sshrule = { 'security_group_rule': { 'direction': 'ingress', 'security_group_id': securitygroupid, 'port_range_min': '22', 'port_range_max': '22', 'protocol': 'tcp', 'remote_group_id': None, 'remote_ip_prefix': '0.0.0.0/0' } } icmprule = { 'security_group_rule': { 'direction': 'ingress', 'security_group_id': securitygroupid, 'protocol': 'icmp', 'remote_group_id': None, 'remote_ip_prefix': '0.0.0.0/0' } } try: neutron.create_security_group_rule(sshrule) neutron.create_security_group_rule(icmprule) except: pass return {'result': 'success'}
def create(self, name, virttype=None, profile='', plan='kvirt', flavor=None, cpumodel='Westmere', cpuflags=[], cpupinning=[], numcpus=2, memory=512, guestid='guestrhel764', pool='default', image=None, disks=[{ 'size': 10 }], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags=[], storemetadata=False, sharedfolders=[], kernel=None, initrd=None, cmdline=None, placement=[], autostart=False, cpuhotplug=False, memoryhotplug=False, numamode=None, numa=[], pcidevices=[], tpm=False, rng=False, metadata={}, securitygroups=[]): glance = self.glance nova = self.nova neutron = self.neutron try: nova.servers.find(name=name) return { 'result': 'failure', 'reason': "VM %s already exists" % name } except: pass allflavors = [f for f in nova.flavors.list()] allflavornames = [flavor.name for flavor in allflavors] if flavor is None: flavors = [ flavor for flavor in allflavors if flavor.ram >= memory and flavor.vcpus >= numcpus ] flavor = flavors[0] if flavors else nova.flavors.find( name="m1.tiny") pprint("Using flavor %s" % flavor.name) elif flavor not in allflavornames: return { 'result': 'failure', 'reason': "Flavor %s not found" % flavor } else: flavor = nova.flavors.find(name=flavor) nics = [] need_floating = True for net in nets: if isinstance(net, str): netname = net elif isinstance(net, dict) and 'name' in net: netname = net['name'] try: net = nova.neutron.find_network(name=netname) if net.to_dict()['router:external']: need_floating = False except Exception as e: error(e) return { 'result': 'failure', 'reason': "Network %s not found" % netname } nics.append({'net-id': net.id}) if image is not None: glanceimages = [ img for img in glance.images.list() if img.name == image ] if glanceimages: glanceimage = glanceimages[0] else: msg = "you don't have image %s" % image return {'result': 'failure', 'reason': msg} block_dev_mapping = {} for index, disk in enumerate(disks): imageref = None diskname = "%s-disk%s" % (name, index) letter = chr(index + ord('a')) if isinstance(disk, int): disksize = disk diskthin = True elif isinstance(disk, str) and disk.isdigit(): disksize = int(disk) diskthin = True elif isinstance(disk, dict): disksize = disk.get('size', '10') diskthin = disk.get('thin', True) if index == 0 and image is not None: if not diskthin: imageref = glanceimage.id else: continue newvol = self.cinder.volumes.create(name=diskname, size=disksize, imageRef=imageref) block_dev_mapping['vd%s' % letter] = newvol.id key_name = 'kvirt' keypairs = [k.name for k in nova.keypairs.list()] if key_name not in keypairs: homekey = None if not os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME'])\ and not os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME']): print( "neither id_rsa.pub or id_dsa public keys found in your .ssh directory, you might have trouble " "accessing the vm") else: if os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME']): homekey = open("%s/.ssh/id_rsa.pub" % os.environ['HOME']).read() else: homekey = open("%s/.ssh/id_dsa.pub" % os.environ['HOME']).read() nova.keypairs.create(key_name, homekey) elif keypairs: key_name = keypairs[0] if key_name != 'kvirt': pprint('Using keypair %s' % key_name) else: error("Couldn't locate or create keypair for use. Leaving...") return {'result': 'failure', 'reason': "No usable keypair found"} userdata = None if cloudinit: if image is not None and common.needs_ignition(image): version = common.ignition_version(image) userdata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, version=version, plan=plan, image=image) else: userdata = common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, storemetadata=storemetadata)[0] meta = {x: metadata[x] for x in metadata if x in METADATA_FIELDS} instance = nova.servers.create(name=name, image=glanceimage, flavor=flavor, key_name=key_name, nics=nics, meta=meta, userdata=userdata, block_device_mapping=block_dev_mapping, security_groups=securitygroups) tenant_id = instance.tenant_id if need_floating: floating_ips = [ f['id'] for f in neutron.list_floatingips()['floatingips'] if f['port_id'] is None ] if not floating_ips: network_id = None if self.external_network is not None: networks = [ n for n in neutron.list_networks()['networks'] if n['router:external'] if n['name'] == self.external_network ] else: networks = [ n for n in neutron.list_networks()['networks'] if n['router:external'] ] if networks: network_id = networks[0]['id'] if network_id is not None and tenant_id is not None: args = dict(floating_network_id=network_id, tenant_id=tenant_id) floating_ip = neutron.create_floatingip( body={'floatingip': args}) floatingip_id = floating_ip['floatingip']['id'] floatingip_ip = floating_ip['floatingip'][ 'floating_ip_address'] pprint('Assigning new floating ip %s for this vm' % floatingip_ip) else: floatingip_id = floating_ips[0] fixed_ip = None timeout = 0 while fixed_ip is None: pprint("Waiting 5 seconds for vm to get an ip") sleep(5) timeout += 5 if timeout >= 240: error("Time out waiting for vm to get an ip") break vm = nova.servers.get(instance.id) if vm.status.lower() == 'error': msg = "Vm reports error status" return {'result': 'failure', 'reason': msg} for key in list(vm.addresses): entry1 = vm.addresses[key] for entry2 in entry1: if entry2['OS-EXT-IPS:type'] == 'fixed': fixed_ip = entry2['addr'] break if fixed_ip is not None: fixedports = [ i['id'] for i in neutron.list_ports()['ports'] if i['fixed_ips'] and i['fixed_ips'][0]['ip_address'] == fixed_ip ] port_id = fixedports[0] neutron.update_floatingip(floatingip_id, {'floatingip': { 'port_id': port_id }}) if not securitygroups: default_securitygroups = [ s for s in neutron.list_security_groups()['security_groups'] if s['name'] == 'default' and s['tenant_id'] == tenant_id ] if default_securitygroups: securitygroup = default_securitygroups[0] securitygroupid = securitygroup['id'] sshrule = { 'security_group_rule': { 'direction': 'ingress', 'security_group_id': securitygroupid, 'port_range_min': '22', 'port_range_max': '22', 'protocol': 'tcp', 'remote_group_id': None, 'remote_ip_prefix': '0.0.0.0/0' } } icmprule = { 'security_group_rule': { 'direction': 'ingress', 'security_group_id': securitygroupid, 'protocol': 'icmp', 'remote_group_id': None, 'remote_ip_prefix': '0.0.0.0/0' } } try: neutron.create_security_group_rule(sshrule) neutron.create_security_group_rule(icmprule) except: pass return {'result': 'success'}
def create(self, name, virttype='kvm', profile='', plan='kvirt', cpumodel='Westmere', cpuflags=[], numcpus=2, memory=512, guestid='guestrhel764', pool=None, template=None, disks=[{ 'size': 10 }], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}): if self.exists(name): return { 'result': 'failure', 'reason': "VM %s already exists" % name } if template is not None and template not in self.volumes( ) and template not in REGISTRYDISKS: return { 'result': 'failure', 'reason': "you don't have template %s" % template } default_disksize = disksize default_pool = pool crds = self.crds core = self.core namespace = self.namespace allpvc = core.list_namespaced_persistent_volume_claim(namespace) templates = { p.metadata.annotations['kcli/template']: p.metadata.name for p in allpvc.items if p.metadata.annotations is not None and 'kcli/template' in p.metadata.annotations } vm = { 'kind': 'VirtualMachine', 'spec': { 'terminationGracePeriodSeconds': 0, 'domain': { 'resources': { 'requests': { 'memory': "%sM" % memory } }, 'devices': { 'disks': [] } }, 'volumes': [] }, 'apiVersion': 'kubevirt.io/v1alpha1', 'metadata': { 'namespace': namespace, 'name': name, 'annotations': { 'kcli/plan': plan, 'kcli/profile': profile, 'kcli/template': template } } } pvcs = [] sizes = [] for index, disk in enumerate(disks): existingpvc = False diskname = "disk%s" % index volname = "%s-vol%s" % (name, index) letter = chr(index + ord('a')) if disk is None: disksize = default_disksize diskpool = default_pool elif isinstance(disk, int): disksize = disk diskpool = default_pool elif isinstance(disk, dict): disksize = disk.get('size', default_disksize) diskpool = disk.get('pool', default_pool) if 'name' in disk: volname = disk['name'] existingpvc = True myvolume = {'volumeName': volname, 'name': volname} if template is not None and index == 0: if template in REGISTRYDISKS: myvolume['registryDisk'] = {'image': template} else: myvolume['persistentVolumeClaim'] = {'claimName': volname} if index > 0 or template is None: myvolume['persistentVolumeClaim'] = {'claimName': volname} newdisk = { 'volumeName': volname, 'disk': { 'dev': 'vd%s' % letter }, 'name': diskname } vm['spec']['domain']['devices']['disks'].append(newdisk) vm['spec']['volumes'].append(myvolume) if index == 0 and template in REGISTRYDISKS: continue if existingpvc: continue diskpool = self.check_pool(pool) pvc = { 'kind': 'PersistentVolumeClaim', 'spec': { 'storageClassName': diskpool, 'accessModes': ['ReadWriteOnce'], 'resources': { 'requests': { 'storage': '%sGi' % disksize } } }, 'apiVersion': 'v1', 'metadata': { 'name': volname } } if template is not None and index == 0 and template not in REGISTRYDISKS and self.usecloning: pvc['metadata']['annotations'] = { 'k8s.io/CloneRequest': templates[template] } pvcs.append(pvc) sizes.append(disksize) if cloudinit: common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, iso=False) cloudinitdata = open('/tmp/user-data', 'r').read() cloudinitdisk = { 'volumeName': 'cloudinitvolume', 'cdrom': { 'readOnly': True }, 'name': 'cloudinitdisk' } vm['spec']['domain']['devices']['disks'].append(cloudinitdisk) cloudinitencoded = base64.b64encode(cloudinitdata) cloudinitvolume = { 'cloudInitNoCloud': { 'userDataBase64': cloudinitencoded }, 'name': 'cloudinitvolume' } vm['spec']['volumes'].append(cloudinitvolume) if self.debug: pretty_print(vm) # try: for pvc in pvcs: pvcname = pvc['metadata']['name'] pvcsize = pvc['spec']['resources']['requests']['storage'].replace( 'Gi', '') if template not in REGISTRYDISKS and index == 0: if self.usecloning: # NOTE: we should also check that cloning finished in this case core.create_namespaced_persistent_volume_claim( namespace, pvc) bound = self.pvc_bound(pvcname, namespace) if not bound: return { 'result': 'failure', 'reason': 'timeout waiting for pvc %s to get bound' % pvcname } continue else: volname = "%s-vol0" % (name) copy = self.copy_image(diskpool, template, volname) if copy['result'] == 'failure': reason = copy['reason'] return {'result': 'failure', 'reason': reason} continue core.create_namespaced_persistent_volume_claim(namespace, pvc) bound = self.pvc_bound(pvcname, namespace) if not bound: return { 'result': 'failure', 'reason': 'timeout waiting for pvc %s to get bound' % pvcname } prepare = self.prepare_pvc(pvcname, size=pvcsize) if prepare['result'] == 'failure': reason = prepare['reason'] return {'result': 'failure', 'reason': reason} crds.create_namespaced_custom_object(DOMAIN, VERSION, namespace, 'virtualmachines', vm) # except Exception as err: # return {'result': 'failure', 'reason': err} return {'result': 'success'}
def create(self, name, virttype=None, profile='', flavor=None, plan='kvirt', cpumodel='Westmere', cpuflags=[], cpupinning=[], numcpus=2, memory=512, guestid='guestrhel764', pool='default', image=None, disks=[{ 'size': 10 }], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags=[], storemetadata=False, sharedfolders=[], kernel=None, initrd=None, cmdline=None, placement=[], autostart=False, cpuhotplug=False, memoryhotplug=False, numamode=None, numa=[], pcidevices=[], tpm=False, rng=False, metadata={}, securitygroups=[]): conn = self.conn if self.exists(name): return { 'result': 'failure', 'reason': "VM %s already exists" % name } image = self.__evaluate_image(image) keypair = self.keypair if image is not None: Filters = [{'Name': 'name', 'Values': [image]}] images = conn.describe_images(Filters=Filters) if not image.startswith( 'ami-') and 'Images' in images and images['Images']: imageinfo = images['Images'][0] imageid = imageinfo['ImageId'] pprint("Using ami %s" % imageid) image = imageinfo['Name'] else: return { 'result': 'failure', 'reason': 'Invalid image %s' % image } else: return { 'result': 'failure', 'reason': 'An image (or amid) is required' } defaultsubnetid = None if flavor is None: matching = [ f for f in staticf if staticf[f]['cpus'] >= numcpus and staticf[f]['memory'] >= memory ] if matching: flavor = matching[0] pprint("Using instance type %s" % flavor) else: return { 'result': 'failure', 'reason': 'Couldnt find instance type matching requirements' } vmtags = [{ 'ResourceType': 'instance', 'Tags': [{ 'Key': 'Name', 'Value': name }, { 'Key': 'hostname', 'Value': name }] }] for entry in [field for field in metadata if field in METADATA_FIELDS]: vmtags[0]['Tags'].append({'Key': entry, 'Value': metadata[entry]}) if keypair is None: keypair = 'kvirt_%s' % self.access_key_id keypairs = [ k for k in conn.describe_key_pairs()['KeyPairs'] if k['KeyName'] == keypair ] if not keypairs: pprint("Importing your public key as %s" % keypair) if not os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME'])\ and not os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME'])\ and not os.path.exists("%s/.kcli/id_rsa.pub" % os.environ['HOME'])\ and not os.path.exists("%s/.kcli/id_dsa.pub" % os.environ['HOME']): error("No public key found. Leaving") return {'result': 'failure', 'reason': 'No public key found'} elif os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME']): homekey = open("%s/.ssh/id_rsa.pub" % os.environ['HOME']).read() elif os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME']): homekey = open("%s/.ssh/id_dsa.pub" % os.environ['HOME']).read() elif os.path.exists("%s/.kcli/id_rsa.pub" % os.environ['HOME']): homekey = open("%s/.kcli/id_rsa.pub" % os.environ['HOME']).read() else: homekey = open("%s/.kcli/id_dsa.pub" % os.environ['HOME']).read() conn.import_key_pair(KeyName=keypair, PublicKeyMaterial=homekey) if cloudinit: if image is not None and common.needs_ignition(image): version = common.ignition_version(image) userdata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, version=version, plan=plan, image=image) else: userdata = common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, fqdn=True, storemetadata=storemetadata)[0] else: userdata = '' networkinterfaces = [] blockdevicemappings = [] privateips = [] for index, net in enumerate(nets): networkinterface = { 'DeleteOnTermination': True, 'Description': "eth%s" % index, 'DeviceIndex': index, 'Groups': ['string'], 'SubnetId': 'string' } ip = None if isinstance(net, str): netname = net netpublic = True elif isinstance(net, dict) and 'name' in net: netname = net['name'] ip = net.get('ip') alias = net.get('alias') netpublic = net.get('public', True) networkinterface[ 'AssociatePublicIpAddress'] = netpublic if index == 0 else False if netname == 'default': if defaultsubnetid is not None: netname = defaultsubnetid else: vpcs = conn.describe_vpcs() vpcid = [ vpc['VpcId'] for vpc in vpcs['Vpcs'] if vpc['IsDefault'] ][0] subnets = conn.describe_subnets() subnetid = [ subnet['SubnetId'] for subnet in subnets['Subnets'] if subnet['DefaultForAz'] and subnet['VpcId'] == vpcid ][0] netname = subnetid defaultsubnetid = netname pprint("Using subnet %s as default" % defaultsubnetid) if ips and len(ips) > index and ips[index] is not None: ip = ips[index] if index == 0: networkinterface['PrivateIpAddress'] = ip privateip = {'Primary': True, 'PrivateIpAddress': ip} else: privateip = {'Primary': False, 'PrivateIpAddress': ip} privateips = privateips.append(privateip) networkinterface['SubnetId'] = netname networkinterfaces.append(networkinterface) if len(privateips) > 1: networkinterface['PrivateIpAddresses'] = privateips for index, disk in enumerate(disks): if image is not None and index == 0: continue letter = chr(index + ord('a')) # devicename = '/dev/sd%s1' % letter if index == 0 else '/dev/sd%s' % letter devicename = '/dev/xvd%s' % letter blockdevicemapping = { 'DeviceName': devicename, 'Ebs': { 'DeleteOnTermination': True, 'VolumeType': 'standard' } } if isinstance(disk, int): disksize = disk elif isinstance(disk, str) and disk.isdigit(): disksize = str(disk) elif isinstance(disk, dict): disksize = disk.get('size', '10') blockdevicemapping['Ebs']['VolumeType'] = disk.get( 'type', 'standard') blockdevicemapping['Ebs']['VolumeSize'] = disksize blockdevicemappings.append(blockdevicemapping) SecurityGroupIds = [] for sg in securitygroups: sgid = self.get_security_group_id(sg, vpcid) if sgid is not None: SecurityGroupIds.append(sgid) conn.run_instances(ImageId=imageid, MinCount=1, MaxCount=1, InstanceType=flavor, KeyName=keypair, BlockDeviceMappings=blockdevicemappings, UserData=userdata, TagSpecifications=vmtags, SecurityGroupIds=SecurityGroupIds) if reservedns and domain is not None: # eip = conn.allocate_address(Domain='vpc') # vmid = reservation.instances[0].id # conn.associate_address(InstanceId=vmid, AllocationId=eip["AllocationId"]) # self.reserve_dns(name, nets=nets, domain=domain, alias=alias, instanceid=name, ip=eip["PublicIp"]) self.reserve_dns(name, nets=nets, domain=domain, alias=alias, instanceid=name) return {'result': 'success'}
def create(self, name, virttype='kvm', profile='', plan='kvirt', cpumodel='Westmere', cpuflags=[], numcpus=2, memory=512, guestid='guestrhel764', pool='default', template=None, disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, alias=[], overrides={}, tags=None): template = self.__evaluate_template(template) defaultsubnetid = None matchingflavors = [f for f in flavors if flavors[f]['cpus'] >= numcpus and flavors[f]['memory'] >= memory] if matchingflavors: flavor = matchingflavors[0] common.pprint("Using instance type %s" % flavor, color='green') else: return {'result': 'failure', 'reason': 'Couldnt find instance type matching requirements'} conn = self.conn tags = [{'ResourceType': 'instance', 'Tags': [{'Key': 'hostname', 'Value': name}, {'Key': 'plan', 'Value': plan}, {'Key': 'profile', 'Value': profile}]}] keypairs = [k for k in conn.describe_key_pairs()['KeyPairs'] if k['KeyName'] == 'kvirt'] if not keypairs: common.pprint("Importing your public key as kvirt keyname", color='green') if not os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME'])\ and not os.path.exists("%s/.ssh/id_dsa.pub" % os.environ['HOME']): common.pprint("No public key found. Leaving", color='red') return {'result': 'failure', 'reason': 'No public key found'} elif os.path.exists("%s/.ssh/id_rsa.pub" % os.environ['HOME']): homekey = open("%s/.ssh/id_rsa.pub" % os.environ['HOME']).read() else: homekey = open("%s/.ssh/id_dsa.pub" % os.environ['HOME']).read() conn.import_key_pair(KeyName='kvirt', PublicKeyMaterial=homekey) if cloudinit: common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, iso=False, fqdn=True) userdata = open('/tmp/user-data', 'r').read() else: userdata = '' networkinterfaces = [] blockdevicemappings = [] privateips = [] for index, net in enumerate(nets): networkinterface = {'AssociatePublicIpAddress': False, 'DeleteOnTermination': True, 'Description': "eth%s" % index, 'DeviceIndex': index, 'Groups': ['string'], 'SubnetId': 'string'} if index == 0: networkinterface['AssociatePublicIpAddress'] = True ip = None if isinstance(net, str): netname = net elif isinstance(net, dict) and 'name' in net: netname = net['name'] if 'ip' in net: ip = net['ip'] if 'alias' in net: alias = net['alias'] if netname == 'default': if defaultsubnetid is not None: netname = defaultsubnetid else: # Filters = [{'Name': 'isDefault', 'Values': ['True']}] # vpcs = conn.describe_vpcs(Filters=Filters) vpcs = conn.describe_vpcs() vpcid = [vpc['VpcId'] for vpc in vpcs['Vpcs'] if vpc['IsDefault']][0] # Filters = [{'Name': 'vpc-id', 'Values': [vpcid]}, {'Name': 'default-for-az', 'Values': ['True']}] subnets = conn.describe_subnets() subnetid = [subnet['SubnetId'] for subnet in subnets['Subnets'] if subnet['DefaultForAz'] and subnet['VpcId'] == vpcid][0] netname = subnetid defaultsubnetid = netname common.pprint("Using subnet %s as default" % defaultsubnetid, color='green') if ips and len(ips) > index and ips[index] is not None: ip = ips[index] if index == 0: networkinterface['PrivateIpAddress'] = ip privateip = {'Primary': True, 'PrivateIpAddress': ip} else: privateip = {'Primary': False, 'PrivateIpAddress': ip} privateips = privateips.append(privateip) networkinterface['SubnetId'] = netname networkinterfaces.append(networkinterface) if len(privateips) > 1: networkinterface['PrivateIpAddresses'] = privateips for index, disk in enumerate(disks): letter = chr(index + ord('a')) devicename = '/dev/sd%s1' % letter if index == 0 else '/dev/sd%s' % letter blockdevicemapping = {'DeviceName': devicename, 'Ebs': {'DeleteOnTermination': True, 'VolumeType': 'standard'}} if isinstance(disk, int): disksize = disk elif isinstance(disk, dict): disksize = disk.get('size', '10') blockdevicemapping['Ebs']['VolumeType'] = disk.get('type', 'standard') blockdevicemapping['Ebs']['VolumeSize'] = disksize blockdevicemappings.append(blockdevicemapping) # try: # instance = conn.run_instances(ImageId=template, MinCount=1, MaxCount=1, InstanceType=flavor, # KeyName='kvirt', BlockDeviceMappings=blockdevicemappings, # UserData=userdata, TagSpecifications=tags) # except ClientError as e: # if self.debug: # print(e.response) # code = e.response['Error']['Code'] # return {'result': 'failure', 'reason': code} if reservedns and domain is not None: tags[0]['Tags'].append({'Key': 'domain', 'Value': domain}) instance = conn.run_instances(ImageId=template, MinCount=1, MaxCount=1, InstanceType=flavor, KeyName='kvirt', BlockDeviceMappings=blockdevicemappings, UserData=userdata, TagSpecifications=tags) newname = instance['Instances'][0]['InstanceId'] common.pprint("%s created on aws" % newname, color='green') if reservedns and domain is not None: self.reserve_dns(name, nets=nets, domain=domain, alias=alias, instanceid=newname) return {'result': 'success', 'name': newname}
def create(self, name, virttype=None, profile='kvirt', flavor=None, plan='kvirt', cpumodel='host-model', cpuflags=[], cpupinning=[], numcpus=2, memory=512, guestid='centos7_64Guest', pool='default', image=None, disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None, vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None, cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False, files=[], enableroot=True, overrides={}, tags=[], storemetadata=False, sharedfolders=[], kernel=None, initrd=None, cmdline=None, placement=[], autostart=False, cpuhotplug=False, memoryhotplug=False, numamode=None, numa=[], pcidevices=[], tpm=False, rng=False, metadata={}, securitygroups=[]): dc = self.dc vmFolder = dc.vmFolder distributed = self.distributed diskmode = 'persistent' default_diskinterface = diskinterface default_diskthin = diskthin default_disksize = disksize default_pool = pool memory = int(memory) numcpus = int(numcpus) si = self.si dc = self.dc rootFolder = self.rootFolder if plan != 'kvirt': createfolder(si, dc.vmFolder, plan) vmfolder = find(si, dc.vmFolder, vim.Folder, plan) else: vmfolder = dc.vmFolder si = self.si clu = find(si, rootFolder, vim.ComputeResource, self.clu) resourcepool = clu.resourcePool if image is not None: rootFolder = self.rootFolder imageobj = findvm(si, rootFolder, image) if imageobj is None: return {'result': 'failure', 'reason': "Image %s not found" % image} clonespec = createclonespec(resourcepool) confspec = vim.vm.ConfigSpec() confspec.annotation = name confspec.memoryMB = memory confspec.numCPUs = numcpus planopt = vim.option.OptionValue() planopt.key = 'plan' planopt.value = plan profileopt = vim.option.OptionValue() profileopt.key = 'profile' profileopt.value = profile imageopt = vim.option.OptionValue() imageopt.key = 'image' imageopt.value = image extraconfig = [imageopt, planopt, profileopt] clonespec.config = confspec clonespec.powerOn = False cloudinitiso = None if cloudinit: if image is not None and common.needs_ignition(image): version = common.ignition_version(image) ignitiondata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, version=version, plan=plan, image=image) ignitionopt = vim.option.OptionValue() ignitionopt.key = 'guestinfo.ignition.config.data' ignitionopt.value = base64.b64encode(ignitiondata.encode()).decode() encodingopt = vim.option.OptionValue() encodingopt.key = 'guestinfo.ignition.config.data.encoding' encodingopt.value = 'base64' extraconfig.extend([ignitionopt, encodingopt]) else: # customspec = makecuspec(name, nets=nets, gateway=gateway, dns=dns, domain=domain) # clonespec.customization = customspec cloudinitiso = "[%s]/%s/%s.ISO" % (default_pool, name, name) userdata, metadata, netdata = common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns, domain=domain, reserveip=reserveip, files=files, enableroot=enableroot, overrides=overrides, storemetadata=storemetadata) confspec.extraConfig = extraconfig t = imageobj.CloneVM_Task(folder=vmfolder, name=name, spec=clonespec) waitForMe(t) if cloudinitiso is not None: with TemporaryDirectory() as tmpdir: common.make_iso(name, tmpdir, userdata, metadata, netdata) cloudinitisofile = "%s/%s.ISO" % (tmpdir, name) self._uploadimage(default_pool, cloudinitisofile, name) vm = findvm(si, vmFolder, name) c = changecd(self.si, vm, cloudinitiso) waitForMe(c) datastores = {} confspec = vim.vm.ConfigSpec() confspec.name = name confspec.annotation = name confspec.memoryMB = memory confspec.numCPUs = numcpus confspec.extraConfig = [] for entry in [field for field in metadata if field in METADATA_FIELDS]: opt = vim.option.OptionValue() opt.key = entry opt.value = metadata[entry] confspec.extraConfig.append(opt) if nested: confspec.nestedHVEnabled = True confspec.guestId = 'centos7_64Guest' vmfi = vim.vm.FileInfo() filename = "[" + default_pool + "]" vmfi.vmPathName = filename confspec.files = vmfi if vnc: vncport = random.randint(5900, 7000) opt1 = vim.option.OptionValue() opt1.key = 'RemoteDisplay.vnc.port' opt1.value = vncport opt2 = vim.option.OptionValue() opt2.key = 'RemoteDisplay.vnc.enabled' opt2.value = "TRUE" confspec.extraConfig = [opt1, opt2] if image is None: t = vmfolder.CreateVM_Task(confspec, resourcepool) waitForMe(t) vm = find(si, dc.vmFolder, vim.VirtualMachine, name) currentdevices = vm.config.hardware.device currentdisks = [d for d in currentdevices if isinstance(d, vim.vm.device.VirtualDisk)] currentnics = [d for d in currentdevices if isinstance(d, vim.vm.device.VirtualEthernetCard)] confspec = vim.vm.ConfigSpec() devconfspec = [] for index, disk in enumerate(disks): if disk is None: disksize = default_disksize diskthin = default_diskthin diskinterface = default_diskinterface diskpool = default_pool elif isinstance(disk, int): disksize = disk diskthin = default_diskthin diskinterface = default_diskinterface diskpool = default_pool elif isinstance(disk, str) and disk.isdigit(): disksize = int(disk) diskthin = default_diskthin diskinterface = default_diskinterface diskpool = default_pool elif isinstance(disk, dict): disksize = disk.get('size', default_disksize) diskthin = disk.get('thin', default_diskthin) diskinterface = disk.get('interface', default_diskinterface) diskpool = disk.get('pool', default_pool) if index < len(currentdisks) and image is not None: currentdisk = currentdisks[index] currentsize = convert(1000 * currentdisk.capacityInKB, GB=False) if int(currentsize) < disksize: pprint("Waiting for image disk %s to be resized" % index) currentdisk.capacityInKB = disksize * 1048576 diskspec = vim.vm.ConfigSpec() diskspec = vim.vm.device.VirtualDeviceSpec(device=currentdisk, operation="edit") devconfspec.append(diskspec) continue disksize = disksize * 1048576 if diskpool not in datastores: datastore = find(si, rootFolder, vim.Datastore, diskpool) if not datastore: return {'result': 'failure', 'reason': "Pool %s not found" % diskpool} else: datastores[diskpool] = datastore if index == 0: scsispec = createscsispec() devconfspec.append(scsispec) diskspec = creatediskspec(index, disksize, datastore, diskmode, diskthin) devconfspec.append(diskspec) # NICSPEC for index, net in enumerate(nets): if index < len(currentnics): continue nicname = 'Network Adapter %d' % (index + 1) if net == 'default': net = 'VM Network' nicspec = createnicspec(nicname, net) devconfspec.append(nicspec) if iso: if '/' not in iso: matchingisos = [i for i in self._getisos() if i.endswith(iso)] if matchingisos: iso = matchingisos[0] else: return {'result': 'failure', 'reason': "Iso %s not found" % iso} cdspec = createisospec(iso) devconfspec.append(cdspec) # bootoptions = vim.option.OptionValue(key='bios.bootDeviceClasses',value='allow:hd,cd,fd,net') # confspec.bootOptions = vim.vm.BootOptions(bootOrder=[vim.vm.BootOptions.BootableCdromDevice()]) confspec.deviceChange = devconfspec t = vm.Reconfigure(confspec) waitForMe(t) # HANDLE DVS if distributed: # 2-GETMAC vm = findvm(si, vmfolder, name) if vm is None: return "%s not found" % (name) devices = vm.config.hardware.device macaddr = [] for dev in devices: if "addressType" in dir(dev): macaddr.append(dev.macAddress) portgs = {} o = si.content.viewManager.CreateContainerView(rootFolder, [vim.DistributedVirtualSwitch], True) dvnetworks = o.view o.Destroy() for dvnetw in dvnetworks: uuid = dvnetw.uuid for portg in dvnetw.portgroup: portgs[portg.name] = [uuid, portg.key] for k in range(len(nets)): net = nets[k] mactochange = macaddr[k] if net in portgs.keys(): confspec = vim.vm.VirtualMachineSpec() nicspec = vim.vm.device.VirtualDeviceSpec() nicspec.operation = vim.ConfigSpecOperation.edit nic = vim.vm.device.VirtualPCNet32() dnicbacking = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() dvconnection = vim.dvs.DistributedVirtualSwitchPortConnection() dvconnection.switchUuid = portgs[net][0] dvconnection.portgroupKey = portgs[net][1] dnicbacking.port = dvconnection nic.backing = dnicbacking nicspec.device = nic # 2-GETMAC vm = findvm(si, vmfolder, name) if vm is None: return "%s not found" % (name) devices = vm.config.hardware.device for dev in devices: if "addressType" in dir(dev): mac = dev.macAddress if mac == mactochange: dev.backing = dnicbacking nicspec.device = dev devconfspec = [nicspec] confspec.deviceChange = devconfspec t = vm.reconfigVM_Task(confspec) waitForMe(t) t = vm.PowerOnVM_Task(None) waitForMe(t) if start: t = vm.PowerOnVM_Task(None) waitForMe(t) return {'result': 'success'}