Esempio n. 1
0
def template_to_instance(template, hostname, owner):
    """
    Instantiate a VM template with a given hostname and owner.
    """

    cluster = template.cluster
    beparams = {
        "vcpus": template.vcpus,
    }
    memory = template.memory
    if has_balloonmem(cluster):
        minram = template.minmem
        beparams['minmem'] = minram
        beparams['maxmem'] = memory
    else:
        beparams['memory'] = memory

    vcpus = template.vcpus
    disk_size = template.disks[0]["size"]

    kwargs = {
        "os": template.os,
        "ip_check": template.ip_check,
        "name_check": template.name_check,
        "pnode": template.pnode,
        "beparams": beparams,
    }

    job_id = cluster.rapi.CreateInstance('create', hostname,
                                         template.disk_template,
                                         template.disks, template.nics,
                                         **kwargs)
    vm = VirtualMachine()

    vm.cluster = cluster
    vm.hostname = hostname
    vm.ram = memory
    if has_balloonmem(cluster):
        vm.minram = minram
    vm.virtual_cpus = vcpus
    vm.disk_size = disk_size

    vm.owner = owner
    vm.ignore_cache = True

    # Do a dance to get the VM and the job referencing each other.
    vm.save()
    job = Job.objects.create(job_id=job_id, obj=vm, cluster=cluster)
    job.save()
    vm.last_job = job
    vm.save()

    # Grant admin permissions to the owner.
    owner.permissable.grant('admin', vm)

    return vm
Esempio n. 2
0
def template_to_instance(template, hostname, owner):
    """
    Instantiate a VM template with a given hostname and owner.
    """

    cluster = template.cluster
    beparams = {"vcpus": template.vcpus}
    memory = template.memory
    if has_balloonmem(cluster):
        minram = template.minmem
        beparams["minmem"] = minram
        beparams["maxmem"] = memory
    else:
        beparams["memory"] = memory

    vcpus = template.vcpus
    disk_size = template.disks[0]["size"]

    kwargs = {
        "os": template.os,
        "ip_check": template.ip_check,
        "name_check": template.name_check,
        "pnode": template.pnode,
        "beparams": beparams,
    }

    if template.snode:
        kwargs.update({"snode": template.snode})
    # secondary node isn't set, check if drdb is set (this shouldn't happen if
    # form validation is correct)
    elif template.disk_template == "drdb":
        msg = "Disk template set to drdb, but no secondary node set"
        raise RuntimeError(msg)

    job_id = cluster.rapi.CreateInstance(
        "create", hostname, template.disk_template, template.disks, template.nics, **kwargs
    )
    vm = VirtualMachine()

    vm.cluster = cluster
    vm.hostname = hostname
    vm.ram = memory
    if has_balloonmem(cluster):
        vm.minram = minram
    vm.virtual_cpus = vcpus
    vm.disk_size = disk_size

    vm.owner = owner
    vm.ignore_cache = True

    # Do a dance to get the VM and the job referencing each other.
    vm.save()
    job = Job.objects.create(job_id=job_id, obj=vm, cluster=cluster)
    job.save()
    vm.last_job = job
    vm.save()

    # Grant admin permissions to the owner.
    owner.permissable.grant("admin", vm)

    return vm
Esempio n. 3
0
def template_to_instance(template, hostname, owner):
    """
    Instantiate a VM template with a given hostname and owner.
    """

    cluster = template.cluster
    beparams = {
        "vcpus": template.vcpus,
    }

    hvparams = {}
    info = cluster.info
    hv = info['default_hypervisor']
    kvm = hv == 'kvm'
    pvm = hv == 'xen-pvm'
    hvm = hv == 'xen-hvm'
    kvm_or_hvm = kvm or hvm
    kvm_or_pvm = kvm or pvm

    if kvm_or_hvm:
        hvparams.update(boot_order=template.boot_order)
        hvparams.update(cdrom_image_path=template.cdrom_image_path)
        hvparams.update(nic_type=template.nic_type)
        hvparams.update(disk_type=template.disk_type)
    if kvm_or_pvm:
        hvparams.update(kernel_path=template.kernel_path)
        hvparams.update(root_path=template.root_path)
    if kvm:
        hvparams.update(cdrom2_image_path=template.cdrom2_image_path)
        hvparams.update(serial_console=template.serial_console)

    memory = template.memory
    if has_balloonmem(cluster):
        minram = template.minmem
        beparams['minmem'] = minram
        beparams['maxmem'] = memory
    else:
        beparams['memory'] = memory

    vcpus = template.vcpus
    disk_size = template.disks[0]["size"]

    kwargs = {
        "os": template.os,
        "hypervisor": hv,
        "ip_check": template.ip_check,
        "name_check": template.name_check,
        "beparams": beparams,
        "no_install": template.no_install,
        "start": template.start,
        "hvparams": hvparams,
    }

    # Using auto allocator
    if template.iallocator:
        default_iallocator = cluster.info['default_iallocator']
        kwargs.update(iallocator=default_iallocator)
    # Not using allocator, pass pnode
    else:
        kwargs.update(pnode=template.pnode)
        # Also pass in snode if it exists (drdb)
        if template.snode:
            kwargs.update(snode=template.snode)
        # secondary node isn't set but we're using drdb, so programming error
        # (this shouldn't happen if form validation is done correctly)
        elif template.disk_template == 'drdb':
            msg = 'Disk template set to drdb, but no secondary node set'
            raise RuntimeError(msg)

    job_id = cluster.rapi.CreateInstance('create', hostname,
                                         template.disk_template,
                                         template.disks, template.nics,
                                         **kwargs)
    vm = VirtualMachine()

    vm.cluster = cluster
    vm.hostname = hostname
    vm.ram = memory
    if has_balloonmem(cluster):
        vm.minram = minram
    vm.virtual_cpus = vcpus
    vm.disk_size = disk_size

    vm.owner = owner
    vm.ignore_cache = True

    # Do a dance to get the VM and the job referencing each other.
    vm.save()
    job = Job.objects.create(job_id=job_id, obj=vm, cluster=cluster)
    job.save()
    vm.last_job = job
    vm.save()

    # Grant admin permissions to the owner.
    owner.permissable.grant('admin', vm)

    return vm