Exemple #1
0
 def deploy_template(self, template, *args, **kwargs):
     self.logger.debug(' Deploying RHEV template %s to VM %s' %
                       (template, kwargs["vm_name"]))
     timeout = kwargs.pop('timeout', 900)
     power_on = kwargs.pop('power_on', True)
     vm_kwargs = {
         'name': kwargs['vm_name'],
         'cluster': self.api.clusters.get(kwargs['cluster']),
         'template': self.api.templates.get(template)
     }
     if 'placement_policy_host' in kwargs and 'placement_policy_affinity' in kwargs:
         host = params.Host(name=kwargs['placement_policy_host'])
         policy = params.VmPlacementPolicy(
             host=host, affinity=kwargs['placement_policy_affinity'])
         vm_kwargs['placement_policy'] = policy
     if 'cpu' in kwargs:
         vm_kwargs['cpu'] = params.CPU(topology=params.CpuTopology(
             cores=int(kwargs['cpu'])))
     if 'ram' in kwargs:
         vm_kwargs['memory'] = int(kwargs['ram']) * 1024 * 1024  # MB
     self.api.vms.add(params.VM(**vm_kwargs))
     self.wait_vm_stopped(kwargs['vm_name'], num_sec=timeout)
     if power_on:
         self.start_vm(kwargs['vm_name'])
     return kwargs['vm_name']
Exemple #2
0
def hotplug_cpu(api):
    topology = params.CpuTopology(
        cores=1,
        threads=1,
        sockets=2,
    )
    vm = api.vms.get(VM0_NAME)
    vm.cpu.topology = topology
    nt.assert_true(
        vm.update()
    )
    nt.assert_true(api.vms.get(VM0_NAME).cpu.topology.sockets == 2)
    
    open('known_hosts', 'w').close()
    client = ssh.get_ssh_client(
        ip_addr='192.168.201.213',
        ssh_key='known_hosts', 
        username='******',
        password='******'
    )
    command = 'lscpu | grep CPU\'(\'s\')\':'
    stdin, out, err = client.exec_command(command)
    cpu_number = out.read().splitlines()[0].split(":")[1].strip()
    client.close()
    os.remove('known_hosts')
    nt.assert_true(int(cpu_number) == 2)
Exemple #3
0
def _vm_args_to_params(**vm_args):  # noqa - ignore mccabe warning
    """
    Convert fabric-style simple arguments into an oVirt VM parameters structure

    All parameters are as defined in the 'create' task for customizing the pool
    VMs

    :returns: an oVirt VM paameters structure or None if not customization was
              requested
    :rtype: oVirtObjects.VM
    """
    vm_args_supported = (
        'custom_serial_number',
        'memory',
        'memory_guaranteed',
        'memory_balooning',
        'vcpus',
    )
    vm_args = dict((key, value) for key, value in vm_args.iteritems()
                   if key in vm_args_supported and value is not None)
    if not vm_args:
        return None
    vm_params = oVirtParams.VM()
    memory = None
    if 'memory' in vm_args:
        memory = int(vm_args['memory'])
        vm_params.memory = memory
    mem_policy = None
    if 'memory_guaranteed' in vm_args or 'memory_balooning' in vm_args:
        mem_policy = oVirtParams.MemoryPolicy()
        if 'memory_guaranteed' in vm_args:
            mem_policy.guaranteed = int(vm_args['memory_guaranteed'])
        if 'memory_balooning' in vm_args:
            mem_policy.ballooning = bool(vm_args['balooning'])
    # oVirt sets guaranteed to 1G by default so we need to set it for smaller
    # VMs. This is a work-around for oVirt BZ#1333369
    if memory and memory < 1 * GiB:
        if mem_policy is None:
            mem_policy = oVirtParams.MemoryPolicy(guaranteed=memory)
        elif mem_policy.guaranteed is None:
            mem_policy.guaranteed = memory
    vm_params.memory_policy = mem_policy
    if 'vcpus' in vm_args:
        vm_params.cpu = oVirtParams.CPU(topology=oVirtParams.CpuTopology(
            sockets=int(vm_args['vcpus'])))
    if 'custom_serial_number' in vm_args:
        vm_params.serial_number = oVirtParams.SerialNumber(
            policy='custom',
            value=vm_args['custom_serial_number'],
        )
    return vm_params
Exemple #4
0
def make_vm_from_template(api, stream, cfme_data, cluster, temp_template_name,
        temp_vm_name, provider, mgmt_network=None):
    """Makes temporary VM from imported template. This template will be later deleted.
       It's used to add a new disk and to convert back to template.

    Args:
        api: API to chosen RHEVM provider.
        cluster: Cluster to save the temporary VM on.
        mgmt_network: management network on RHEVM box, its 'ovirtmgmt' by default on rhv4.0 and
        'rhevm' on older RHEVM versions.
        temp_template_name: temporary template name created from ova
        temp_vm_name: temporary vm name to be created.
        provider: provider_key
    """
    cores = cfme_data['template_upload']['hardware'][stream]['cores']
    sockets = cfme_data['template_upload']['hardware'][stream]['sockets']
    cpu = params.CPU(topology=params.CpuTopology(cores=cores, sockets=sockets))
    vm_memory = cfme_data['template_upload']['hardware'][stream]['memory'] * 1024 * 1024 * 1024

    try:
        if api.vms.get(temp_vm_name) is not None:
            logger.info("RHEVM:%r Warning: found another VM with this name (%r).",
                        provider, temp_vm_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider)
            return

        actual_template = api.templates.get(temp_template_name)
        actual_cluster = api.clusters.get(cluster)
        params_vm = params.VM(name=temp_vm_name, template=actual_template, cluster=actual_cluster,
            memory=vm_memory, cpu=cpu)
        api.vms.add(params_vm)

        # we must wait for the vm do become available
        def check_status():
            return api.vms.get(temp_vm_name).get_status().state == 'down'

        wait_for(check_status, fail_condition=False, delay=5, num_sec=240)
        if mgmt_network:
            vm = api.vms.get(temp_vm_name)
            nic = vm.nics.get('eth0')
            nic.network = params.Network(
                name=mgmt_network)
            nic.interface = 'virtio'
            nic.update()
        # check, if the vm is really there
        if not api.vms.get(temp_vm_name):
            logger.error("RHEVM:%r temp VM could not be provisioned", provider)
            sys.exit(127)
        logger.info("RHEVM:%r successfully provisioned temp vm", provider)
    except Exception:
        logger.exception("RHEVM:%r Make_temp_vm_from_template failed:", provider)
Exemple #5
0
def add_vm_template(api):
    #TODO: Fix the exported domain generation.
    #For the time being, add VM from Glance imported template.
    if api.templates.get(name=TEMPLATE_CIRROS) is None:
        raise SkipTest('%s: template %s not available.' % (add_vm_template.__name__, TEMPLATE_CIRROS))

    vm_memory = 512 * MB
    vm_params = params.VM(
        name=VM1_NAME,
        description='CirrOS imported from Glance as Template',
        memory=vm_memory,
        cluster=params.Cluster(
            name=TEST_CLUSTER,
        ),
        template=params.Template(
            name=TEMPLATE_CIRROS,
        ),
        display=params.Display(
            type_='vnc',
        ),
        memory_policy=params.MemoryPolicy(
            guaranteed=vm_memory / 2,
            ballooning=False,
        ),
        os=params.OperatingSystem(
            type_='other_linux',
        ),
        timezone='Etc/GMT',
        type_='server',
        serial_number=params.SerialNumber(
            policy='custom',
            value='12345678',
        ),
        cpu=params.CPU(
            architecture='X86_64',
            topology=params.CpuTopology(
                cores=1,
                threads=2,
                sockets=1,
            ),
        ),
    )
    api.vms.add(vm_params)
    testlib.assert_true_within_long(
        lambda: api.vms.get(VM1_NAME).status.state == 'down',
    )
    disk_name = api.vms.get(VM1_NAME).disks.list()[0].name
    testlib.assert_true_within_long(
        lambda:
        api.vms.get(VM1_NAME).disks.get(disk_name).status.state == 'ok'
    )
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
    if vmdisk_alloc == 'thin':
        # define VM params
        vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
                             template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
                             cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
        # define disk params
        vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System",
                             format='cow',
                             storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
        # define network parameters
        network_net = params.Network(name=vmnetwork)
        nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
    elif vmdisk_alloc == 'preallocated':
        # define VM params
        vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
                             template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
                             cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores), sockets=vmcpus)), type_=vmtype)
        # define disk params
        vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System",
                             format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
        # define network parameters
        network_net = params.Network(name=vmnetwork)
        nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')

    try:
        conn.vms.add(vmparams)
    except Exception:
        raise Exception("Error creating VM with specified parameters")
    vm = conn.vms.get(name=vmname)
    try:
        vm.disks.add(vmdisk)
    except Exception:
        raise Exception("Error attaching disk")
    try:
        vm.nics.add(nic_net1)
    except Exception:
        raise Exception("Error adding nic")
Exemple #7
0
def updateCpuNumber( vmname, cpunum ):
    logDebug( "Updating CPU number for VM %s setting total CPU to: %s" %( vmname, cpunum ) )
    #check if is down
    try:
        vm = api.vms.get(name=vmname)
        if vm.get_status().state == 'down':
            c1 = vm.get_cpu()
            c1.set_topology(params.CpuTopology(cores=1,sockets=cpunum))
            vm.set_cpu(c1)
            vm.update()
    except Exception, err:
        logDebug( "Error on updating CPU for VM %s" %( vmname ), 2 )
        logDebug( Exception, 2)
        logDebug( err, 2)
        sys.exit(1)
def prepare_rhevm_template():
    tmp = {
        'template_disks': params.Disks(clone=appliance['clone_template']),
        'cluster_object': api.clusters.get(name=appliance['cluster']),
        'host_object': api.hosts.get(appliance['host']),
        'migrate': appliance['migrate'],
        'appliance_nics': appliance['NICS'][:],
        'appliance_memory': appliance['memory_size'],
        'appliance_type': appliance['vm_type'],
        'num_cores': appliance['cores'],
        'num_cpus': appliance['cpus'],
        'storage_name': appliance['disk_location'],
        'disks': appliance['disks']
    }

    tmp['cpu_topology'] = params.CpuTopology(cores=tmp['num_cores'],
                                             threads=tmp['num_cpus'])
    tmp['cpu_object'] = params.CPU(topology=tmp['cpu_topology'])
    tmp['domain_object'] = api.storagedomains.get(name=tmp['storage_name'])
    tmp['actions'] = params.Action(storage_domain=tmp['domain_object'])
    tmp['placement_object'] = params.VmPlacementPolicy(host=tmp['host_object'],
                                                       affinity=tmp['migrate'])
    return tmp
Exemple #9
0
def createGuest(api,guest_cluster,guest_name,guest_description,guest_mem,guest_cpu,guest_disks_gb,guest_domain,guest_networks):
    cpu_params = params.CPU(topology=params.CpuTopology(cores=guest_cpu))
    try:
        api.vms.add(params.VM(name=guest_name,memory=guest_mem*1024*1024,cluster=api.clusters.get(guest_cluster),template=api.templates.get('Blank'),cpu=cpu_params,type_="server",description=guest_description))

        for ethnum in range(len(guest_networks)):
            api.vms.get(guest_name).nics.add(params.NIC(name='eth'+str(ethnum), network=params.Network(name=guest_networks[ethnum]), interface='virtio'))

        #create bootdisk. First disk is allways bootdisk
        createDisk(api, guest_name, guest_domain, guest_disks_gb[0], bootable=True, disk_name=guest_name+"_Disk1")
        #create remaining disks
        if len(guest_disks_gb) > 1:
            disk_num = 2
            for guest_disk_gb in guest_disks_gb[1:]:
                createDisk(api, guest_name, guest_domain, guest_disk_gb, disk_name=guest_name+"_Disk"+str(disk_num))
                disk_num += 1
        while api.vms.get(guest_name).status.state != 'down':
            sleep(1)

        result = "Succesfully created guest: " + guest_name
    except Exception as e:
        result = 'Failed to create VM with disk and NIC: %s' % str(e)

    return result
    def create(self,
               name,
               clu,
               numcpu,
               numinterfaces,
               netinterface,
               diskthin1,
               disksize1,
               diskinterface,
               memory,
               storagedomain,
               guestid,
               net1,
               net2=None,
               net3=None,
               net4=None,
               mac1=None,
               mac2=None,
               launched=True,
               iso=None,
               diskthin2=None,
               disksize2=None,
               vnc=False):
        boot1, boot2 = 'hd', 'network'
        if iso in ["", "xx", "yy"]:
            iso = None
        if iso:
            boot2 = 'cdrom'
        api = self.api
        memory = memory * MB
        disksize1 = disksize1 * GB
        if disksize2:
            disksize2 = disksize2 * GB
        #VM CREATION IN OVIRT
        #TODO check that clu and storagedomain exist and that there is space there
        diskformat1, diskformat2 = 'raw', 'raw'
        sparse1, sparse2 = False, False
        if diskthin1:
            diskformat1 = 'cow'
            sparse1 = True
        if disksize2 and diskthin2:
            diskformat2 = 'cow'
            sparse2 = True
        vm = api.vms.get(name=name)
        if vm:
            return "VM %s allready existing.Leaving...\n" % name
        clu = api.clusters.get(name=clu)
        storagedomain = api.storagedomains.get(name=storagedomain)
        try:
            disk1 = params.Disk(storage_domains=params.StorageDomains(
                storage_domain=[storagedomain]),
                                name="%s_Disk1" % (name),
                                size=disksize1,
                                type_='system',
                                status=None,
                                interface=diskinterface,
                                format=diskformat1,
                                sparse=sparse1,
                                bootable=True)
            disk1 = api.disks.add(disk1)
            disk1id = disk1.get_id()
        except:
            return "Insufficient space in storage domain for disk1.Leaving...\n"
        if disksize2:
            try:
                disk2 = params.Disk(storage_domains=params.StorageDomains(
                    storage_domain=[storagedomain]),
                                    name="%s_Disk2" % (name),
                                    size=disksize2,
                                    type_='system',
                                    status=None,
                                    interface=diskinterface,
                                    format=diskformat2,
                                    sparse=sparse2,
                                    bootable=False)
                disk2 = api.disks.add(disk2)
                disk2id = disk2.get_id()
            except:
                return "Insufficient space in storage domain for disk2.Leaving...\n"

        #boot order
        boot = [params.Boot(dev=boot1), params.Boot(dev=boot2)]
        #vm creation
        kernel, initrd, cmdline = None, None, None
        if vnc:
            display = params.Display(type_='vnc')
        else:
            display = params.Display(type_='spice')
        api.vms.add(
            params.VM(
                name=name,
                memory=memory,
                cluster=clu,
                display=display,
                template=api.templates.get('Blank'),
                os=params.OperatingSystem(type_=guestid,
                                          boot=boot,
                                          kernel=kernel,
                                          initrd=initrd,
                                          cmdline=cmdline),
                cpu=params.CPU(topology=params.CpuTopology(cores=numcpu)),
                type_="server"))
        #add nics
        api.vms.get(name).nics.add(
            params.NIC(name='eth0',
                       network=params.Network(name=net1),
                       interface=netinterface))

        if numinterfaces >= 2:
            api.vms.get(name).nics.add(
                params.NIC(name='eth1',
                           network=params.Network(name=net2),
                           interface=netinterface))
            #compare eth0 and eth1 to get sure eth0 has a lower mac
            eth0ok = True
            maceth0 = api.vms.get(name).nics.get(name="eth0").mac.address
            maceth1 = api.vms.get(name).nics.get(name="eth1").mac.address
            eth0 = maceth0.split(":")
            eth1 = maceth1.split(":")
            for i in range(len(eth0)):
                el0 = int(eth0[i], 16)
                el1 = int(eth1[i], 16)
                if el0 == el1:
                    pass
                elif el0 > el1:
                    eth0ok = False

            if not eth0ok:
                tempnic = "00:11:11:11:11:11"
                nic = api.vms.get(name).nics.get(name="eth0")
                nic.mac.address = tempnic
                nic.update()
                nic = api.vms.get(name).nics.get(name="eth1")
                nic.mac.address = maceth0
                nic.update()
                nic = api.vms.get(name).nics.get(name="eth0")
                nic.mac.address = maceth1
                nic.update()

        if mac1:
            nic = api.vms.get(name).nics.get(name="eth0")
            if not ":" in mac1:
                mac1 = "%s%s" % (nic.mac.address[:-2], mac1)
            nic.mac.address = mac1
            nic.update()

        if mac2:
            nic = api.vms.get(name).nics.get(name="eth1")
            if not ":" in mac2:
                mac2 = "%s%s" % (nic.mac.address[:-2], mac2)
            nic.mac.address = mac2
            nic.update()

        if numinterfaces >= 3:
            api.vms.get(name).nics.add(
                params.NIC(name='eth2',
                           network=params.Network(name=net3),
                           interface=netinterface))
        if numinterfaces >= 4:
            api.vms.get(name).nics.add(
                params.NIC(name='eth3',
                           network=params.Network(name=net4),
                           interface=netinterface))
        api.vms.get(name).update()
        if iso:
            iso = checkiso(api, iso)
            cdrom = params.CdRom(file=iso)
            api.vms.get(name).cdroms.add(cdrom)
        while api.disks.get(id=disk1id).get_status().get_state() != "ok":
            time.sleep(5)
        api.vms.get(name).disks.add(disk1)
        while not api.vms.get(name).disks.get(id=disk1id):
            time.sleep(2)
        api.vms.get(name).disks.get(id=disk1id).activate()
        if disksize2:
            while api.disks.get(id=disk2id).get_status().get_state() != "ok":
                time.sleep(5)
            api.vms.get(name).disks.add(disk2)
            while not api.vms.get(name).disks.get(id=disk2id):
                time.sleep(2)
            api.vms.get(name).disks.get(id=disk2id).activate()
        #retrieve MACS for cobbler
        vm = api.vms.get(name=name)
        for nic in vm.nics.list():
            self.macaddr.append(nic.mac.address)
Exemple #11
0
    def spawn(self, context, instance, image_meta, network_info,block_device_info=None):
        """ Creates a VM instance in oVirt."""
        try:
            
            try:
                for i in network_info:
                    port_id = i['ovs_interfaceid']
                    mac = i['address']
            except Exception as e:
                LOG.debug(_("network_info error %s" %str(e)))
            
            MB = 1024 * 1024
            GB = 1024 * MB
            
            #name = instance['name']
            name = instance['display_name']
            cluster = self._session.clusters.get(instance['node'])
            
            
            memory = instance['memory_mb'] * MB 
            
            template = self._session.templates.get('Blank')
            
            tdesc =  image_meta['name'] + " ("+str(image_meta['id'])[0:7]+")"
            for t in self._session.templates.list():
                if( tdesc == t.get_description()):
                    template = t
             
            vmType = 'server' 
            
            instance_vcpus = instance['vcpus']
            template_cpus = template.cpu.topology.cores
            vm_cpu_cores = (instance_vcpus - template_cpus) + 1
            LOG.info(_("*******rhevm -vmops ---- spawn--vm_cpu_cores-->>%s" %vm_cpu_cores))
            
            cpuTopology = params.CpuTopology(cores=vm_cpu_cores, sockets=1) 
            cpu = params.CPU(topology=cpuTopology) 
            
            ovirtVMParam = params.VM(name=name, 
                                 type_=vmType, 
                                 memory=memory, 
                                 cluster=cluster, 
                                 cpu=cpu, 
                                 template=template) 
             
            newVm = self._session.vms.add(ovirtVMParam)
            
            #stackutils.delete_port(port_id)
                                   
            nicName = 'nic-1' 
            macparam = params.MAC(address=mac) 
            network = self._session.networks.get(name='ovirtmgmt') # ovirtmgmt, Net1
            nicInterface = 'virtio' 
            nic = params.NIC(name=nicName, 
                             interface=nicInterface, 
                             #mac=macparam, 
                             network=network) 
            
            newNic = newVm.nics.add(nic) 
            
            '''
            instance_root_gb = instance['root_gb']
            dl = template.disks.list()
            template_disksize = 0
            for d in dl:
                template_disksize += d.get_size()
                
            template_diskGB = template_disksize / GB
            pending_diskGB = (instance_root_gb - template_diskGB)
            
            if pending_diskGB > 0:
                domain = self._engine.storagedomains.get('DataNFS')
                storageDomain = params.StorageDomains(storage_domain=[domain])
                #volume_size = volume['size']
                size = pending_diskGB * pow(2, 30) 
                diskType = 'data' 
                diskFormat = 'cow' 
                diskInterface = 'virtio' 
                sparse = True 
                bootable = False
                vol_name = 'RootDisk'
                
                newVm.disks.add(params.Disk(
                               name=vol_name,
                               storage_domains=storageDomain,
                               size=size, 
                               type_=diskType,
                               interface=diskInterface, 
                               format=diskFormat, 
                               #sparse=FLAGS.ovirt_engine_sparse,
                               sparse=sparse,
                               bootable=bootable))
                
            '''
            while self._session.vms.get(name).status.state != 'down':
                time.sleep(3)
            try:
                newVm.start()
            except Exception as e:
                #print " ERROR....VM is not able to start : ", str(e)
                newVm.delete()
                raise Exception

            while self._session.vms.get(name).status.state != 'up':
                time.sleep(3)

        except Exception as e:
            raise Exception
Exemple #12
0
def create_vm(vmprefix,disksize, storagedomain,network, vmcores,vmsockets,addstorage):
    print ("------------------------------------------------------")
    print ("Creating", num, "RHEV based virtual machines")
    print ("-------------------------------------------------------")
    for machine in range(0,int(num)):
        try:
            vm_name = str(vmprefix) + "_" + str(machine) + "_sockets_" + str(vmsockets)
            vm_memory = int(memory)*1024*1024*1024
            vm_cluster = api.clusters.get(name=cluster)
            vm_template = api.templates.get(name=vmtemplate)
            vm_os = params.OperatingSystem(boot=[params.Boot(dev="hd")])
            cpu_params = params.CPU(topology=params.CpuTopology(sockets=vmsockets,cores=vmcores))
            # set proper VM parameters - based on will VM be on "thin" disk or "preallocated" disk
            if vmdiskpreallocated == "yes":
                vm_params = params.VM(name=vm_name,memory=vm_memory,cluster=vm_cluster,template=vm_template,os=vm_os,cpu=cpu_params, disks=params.Disks(clone=True))
            elif vmdiskpreallocated == "no":
                vm_params = params.VM(name=vm_name,memory=vm_memory,cluster=vm_cluster, template=vm_template, os=vm_os,cpu=cpu_params)

            print ("creating virtual machine", vm_name)
            api.vms.add(vm=vm_params)
            api.vms.get(vm_name).nics.add(params.NIC(name=nicname, network=params.Network(name=network), interface='virtio'))
            # update vm and add disk to it
            wait_vm_state(vm_name,"down")
            print ("Virtual machine created: ", vm_name, "and it has parameters"," memory:", memory,"[GB]",
                   " cores:", vmcores,
                   " sockets", vmsockets,
                   " waiting on machine to unlock so we proceed with configuration")
            wait_vm_state(vm_name, "down")
            diskname = "disk_" + str(vmprefix) + str(machine)

            # if there is necessary to add additional disk to VM - can be preallocated or thin

            if addstorage == "yes" and diskpreallocated == "no":

                for disk in range(0,int(numdisks)):
                    # add one disk at time - one will be added by default - only add thin disks
                    api.vms.get(vm_name).disks.add(params.Disk(name=diskname + "_" + str(disk), storage_domains=params.StorageDomains(storage_domain=[api.storagedomains.get(name=storagedomain)]),
                                                           size=int(disksize)*1024*1024*1024,
                                                           status=None,
                                                           interface='virtio',
                                                           format='cow',
                                                           sparse=True,
                                                           bootable=False))
                    print ("Disk of size:",disksize,"GB originating from", storagedomain, "storage domain is attached to VM - but we cannot start machine before disk is in OK state"
                                                                                      " starting machine with disk attached to VM and same time having disk in Locked state will result in machine start failure")
                    wait_disk_state(diskname + "_" + str(disk) ,"ok")

                print ("Machine", vm_name, "is ready to be started")
                api.vms.get(vm_name).start()
                print ("Machine", vm_name, "started successfully, machine parameters are memory:",memory,"[GB]",
                       "cores:", vmcores,
                       " sockets", vmsockets,
                       " storage disk", disksize, "[GB]")


            elif addstorage == "yes" and diskpreallocated == "yes":

                for disk in range(0, int(numdisks)):
                    api.vms.get(vm_name).disks.add(params.Disk(name=diskname + "_" + str(disk) , storage_domains=params.StorageDomains(storage_domain=[api.storagedomains.get(name=storagedomain)]),
                                                           size=int(disksize)*1024*1024*1024,
                                                           status=None,
                                                           interface='virtio',
                                                           format='raw',
                                                           sparse=False,
                                                           bootable=False
                                                           ))
                    # if disk is not in "OK" state ... wait here - we cannot start machine if this is not the case
                    print ("Disk of size:",disksize,"GB originating from", storagedomain, "storage domain is attached to VM - but we cannot start machine before disk is in OK state"
                   " starting machine with disk attached to VM and same time having disk in Locked state will result in machine start failure")
                    wait_disk_state(diskname + "_" + str(disk) ,"ok")

                print ("Machine", vm_name, "is ready to be started")
                api.vms.get(vm_name).start()
                print ("Machine", vm_name, "started successfully, machine parameters are memory:",memory,"[GB]"
                   " cores:", vmcores,
                   " sockets", vmsockets,
                   " storage disk", disksize, "[GB]"
                       )

            elif addstorage == "no":
                print ("addstorage=no was specified for", vm_name,"no additional disk will be added, starting VM:", vm_name)
                api.vms.get(vm_name).start()

            print ("Machine", vm_name, "started successfully, machine parameters are memory:",memory,"[GB]"
                       "cores:", vmcores,
                       "sockets:", vmsockets,
                       "storage_disk", disksize, "[GB]"
                       )
        except Exception as e:
            print ("Adding virtual machine '%s' failed: %s", vm_name, e)
Exemple #13
0
from ovirtsdk.xml import params
from rhev_functions import *

baseurl = "https://%s:%s" % (options.server, options.port)

api = API(url=baseurl, username=options.username, password=options.password, insecure=True)

try:
    value = api.hosts.list()
except:
    print "Error accessing RHEV-M api, please check data and connection and retry"
    sys.exit(1)

# Define VM based on parameters
if __name__ == "__main__":
    vmparams = params.VM(os=params.OperatingSystem(type_=options.osver), cpu=params.CPU(topology=params.CpuTopology(cores=int(options.vmcpu))), name=options.name, memory=1024 * 1024 * 1024 * int(options.vmmem), cluster=api.clusters.get(name=options.cluster), template=api.templates.get(name="Blank"), type_="server")
    vmdisk = params.Disk(size=1024 * 1024 * 1024 * int(options.sdsize), wipe_after_delete=True, sparse=True, interface="virtio", type_="System", format="cow", storage_domains=params.StorageDomains(storage_domain=[api.storagedomains.get(name="data_domain")]))
    vmnet = params.NIC()

    network_gest = params.Network(name=options.vmgest)
    network_serv = params.Network(name=options.vmserv)

    nic_gest = params.NIC(name='eth0', network=network_gest, interface='virtio')
    nic_serv = params.NIC(name='eth1', network=network_serv, interface='virtio')

    try:
        api.vms.add(vmparams)
    except:
        print "Error creating VM with specified parameters, recheck"
        sys.exit(1)
Exemple #14
0
def create(name,
           cluster_query=None,
           template_query='name=Blank',
           memory=2 * GiB,
           vcpus=2,
           disk_query=None,
           ostype='rhel_7x64',
           networks=None,
           show=None,
           headers='yes',
           ovirt=None):
    """
    Create a new oVirt VM

    :param str name:           The name of the VM to create
    :param str cluster_query:  A query to find the cluster to place the VM in,
                               if more then one cluster is found, the first one
                               is used
    :param str template_query: A query to find the template to use to create
                               the VM, if more then one template is found the
                               first one is used
    :param int memory:         The VM memory size (in bytes)
    :param ind vcpus:          The amount of vCPUs to assign to the VM
    :param str disk_query:     A query for disks to attach to the VM
    :param str ostype:         The OS type of the VM
    :param str networks:       A pipe (|) separated list of networks to attach
                               to the VM in the order they should be added, a
                               network can appear more then once. Only networks
                               that are attached to the VM`s cluster will be
                               added
    :param ovirtsdk.api.API ovirt: An open oVirt API connection

    The 'show' and 'headers' parameters are the same as for the 'query' task

    :returns: The VM that was created
    :rtype: ovirtsdk.infrastructure.brokers.VM
    """
    if cluster_query is None:
        # get the 2 top clusters so we'll issue a warning if there is more then
        # one and the user didn't specify an explicit selection query
        clusters = ovirt.clusters.list(max=2)
    else:
        clusters = ovirt.clusters.list(query=cluster_query)
    if not clusters:
        abort("No cluster found by given query")
    if len(clusters) > 1:
        warn("More then one cluster found, will use the first")
    cluster = clusters[0]
    templates = ovirt.templates.list(query=template_query)
    if not templates:
        abort("No template found by given query")
    if len(templates) > 1:
        warn("More then one tempalte found, will use the first")
    template = templates[0]
    vm = ovirt.vms.add(
        oVirtParams.VM(
            name=name,
            template=template,
            cluster=cluster,
            memory=int(memory),
            cpu=oVirtParams.CPU(topology=oVirtParams.CpuTopology(
                sockets=int(vcpus))),
            os=oVirtParams.OperatingSystem(type_=ostype),
        ))
    if disk_query is not None:
        disks = ovirt.disks.list(query=disk_query)
        for disk in disks:
            vm.disks.add(disk)
    if networks is not None:
        nic_name = ('nic{0}'.format(i) for i in count())
        for network_name in networks.split('|'):
            network = cluster.networks.get(name=network_name)
            if network is None:
                continue
            vm.nics.add(nic=oVirtParams.NIC(
                name=next(nic_name),
                network=network,
                linked=True,
            ))
    oVirtObjectType.all_types['vm'].print_table((vm, ),
                                                show=show,
                                                headers=headers)
    return vm
Exemple #15
0
api = apilogin(url=baseurl,
               username=options.username,
               password=options.password)

try:
    value = api.hosts.list()
except:
    print "Error accessing RHEV-M api, please check data and connection and retry"
    sys.exit(1)

# Define VM based on parameters
if __name__ == "__main__":
    vmparams = params.VM(
        os=params.OperatingSystem(type_=options.osver),
        cpu=params.CPU(topology=params.CpuTopology(cores=int(options.vmcpu))),
        name=options.name,
        memory=1024 * 1024 * 1024 * int(options.vmmem),
        cluster=api.clusters.get(name=options.cluster),
        template=api.templates.get(name="Blank"),
        type_="server")
    vmdisk = params.Disk(
        size=1024 * 1024 * 1024 * int(options.sdsize),
        wipe_after_delete=True,
        sparse=True,
        interface="virtio",
        type_="System",
        format="cow",
        storage_domains=params.StorageDomains(
            storage_domain=[api.storagedomains.get(name="data_domain")]))
    vmnet = params.NIC()