def test_view_load_recover(self): """ Tests loading a VM that failed to deploy back into the create view for editing """ url = '/cluster/%s/%s/recover/' args = (self.cluster.slug, self.vm.hostname) # vm with no template should redirect self.assert_200(url, args, [self.superuser], template='ganeti/virtual_machine/detail.html', follow=True) template = VirtualMachineTemplate() template.save() self.vm.template = template self.vm.save() self.assert_standard_fails(url, args) users = [ self.superuser, self.vm_admin, self.vm_modify, self.cluster_admin ] self.assert_200(url, args, users, template='ganeti/virtual_machine/create.html')
def setUp(self): models.client.GanetiRapiClient = RapiProxy # Cluster cluster = Cluster(hostname='test.cluster.gwm', slug='test', username='******', password='******') #cluster.info = INFO cluster.save() # Template template_data = dict( template_name='new.vm.template', description='A new template.', cluster=cluster.id, start=True, name_check=True, disk_template='plain', disk_count=0, memory=256, vcpus=2, root_path='/', kernel_path='', cdrom_image_path='', serial_console=False, nic_type='paravirtual', disk_type='paravirtual', nic_count=0, boot_order='disk', os='image+ubuntu-lucid', ) data = template_data.copy() data['cluster'] = cluster del data['disk_count'] del data['nic_count'] template = VirtualMachineTemplate(**data) template.save() # Template Fields fields = vars(template).keys() # Users self.create_users([ ('superuser', { 'is_superuser': True }), 'cluster_admin', ]) self.cluster_admin.grant('admin', cluster) self.users = [self.superuser, self.cluster_admin] self.template = template self.cluster = cluster self.template_data = template_data self.template_fields = fields
def setUp(self): models.client.GanetiRapiClient = RapiProxy # Cluster cluster = Cluster(hostname='test.cluster.gwm', slug='test', username='******', password='******') #cluster.info = INFO cluster.save() # Template template_data = dict( template_name='new.vm.template', description='A new template.', cluster=cluster.id, start=True, name_check=True, disk_template='plain', disk_count=0, memory=256, vcpus=2, root_path='/', kernel_path='', cdrom_image_path='', serial_console=False, nic_type='paravirtual', disk_type='paravirtual', nic_count=0, boot_order='disk', os='image+ubuntu-lucid', ) data = template_data.copy() data['cluster'] = cluster del data['disk_count'] del data['nic_count'] template = VirtualMachineTemplate(**data) template.save() # Template Fields fields = vars(template).keys() # Users self.create_users([ ('superuser', {'is_superuser':True}), 'cluster_admin', ]) self.cluster_admin.grant('admin', cluster) self.users = [self.superuser, self.cluster_admin] self.template = template self.cluster = cluster self.template_data = template_data self.template_fields = fields
def setUp(self): self.tearDown() models.client.GanetiRapiClient = RapiProxy cluster = Cluster(hostname='test.cluster', slug='test', username='******', password='******') cluster.id = 23 # XXX MySQL DB does not reset auto-increment IDs when an object is removed cluster.save() cluster.sync_nodes() template = VirtualMachineTemplate(template_name="Template1", cluster=cluster) template.disks = [{'size': 500}] template.nics = [{'mode': 'bridged', 'link': ''}] template.save() instance = VirtualMachine(hostname='new.vm.hostname', cluster=cluster) instance.info = INSTANCE instance.disks = [] instance.nics = [] instance.save() # Users self.create_users([ ('superuser', { 'is_superuser': True }), 'cluster_admin', 'create_vm', 'unauthorized', ]) self.cluster_admin.grant('admin', cluster) self.create_vm.grant('create_vm', cluster) self.create_template_data = dict( cluster=cluster.pk, template_name='foo_bar', memory=512, disk_template='plain', disk_count=0, nic_count=0, ) self.cluster = cluster self.template = template self.instance = instance self.c = Client()
def setUp(self): self.tearDown() models.client.GanetiRapiClient = RapiProxy cluster = Cluster(hostname='test.cluster', slug='test', username='******', password='******') cluster.id = 23 # XXX MySQL DB does not reset auto-increment # IDs when an object is removed cluster.save() cluster.sync_nodes() template = VirtualMachineTemplate(template_name="Template1", cluster=cluster) template.disks = [{'size': 500}] template.nics = [{'mode': 'bridged', 'link': ''}] template.save() instance = VirtualMachine(hostname='new.vm.hostname', cluster=cluster) instance.info = INSTANCE instance.disks = [] instance.nics = [] instance.save() # Users self.create_users([ ('superuser', {'is_superuser': True}), 'cluster_admin', 'create_vm', 'unauthorized', ]) self.cluster_admin.grant('admin', cluster) self.create_vm.grant('create_vm', cluster) self.create_template_data = dict( cluster=cluster.pk, template_name='foo_bar', memory=512, disk_template='plain', disk_count=0, nic_count=0, ) self.cluster = cluster self.template = template self.instance = instance self.c = Client()
def test_view_load_recover(self): """ Tests loading a VM that failed to deploy back into the create view for editing """ url = '/cluster/%s/%s/recover/' args = (cluster.slug, vm.hostname) # vm with no template should redirect self.assert_200(url, args, [superuser], template='ganeti/virtual_machine/detail.html', follow=True) template = VirtualMachineTemplate() template.save() vm.template = template vm.save() self.assert_standard_fails(url, args) users = [superuser, vm_admin, vm_modify, cluster_admin] self.assert_200(url, args, users, template='ganeti/virtual_machine/create.html')
def instance_to_template(vm, name): """ Create, save, and return a VM template representing all of the information in the VM instance. The name is given to the template to distinguish it from other templates. """ template = VirtualMachineTemplate() # Basic stuff first. template.template_name = name template.description = "" template.cluster = vm.cluster template.start = vm.info["admin_state"] template.disk_template = vm.info["disk_template"] template.os = vm.operating_system # Backend parameters. template.vcpus = vm.virtual_cpus template.memory = vm.ram if has_balloonmem(vm.cluster): template.minmem = vm.minram template.disks = [{"size": size} for size in vm.info["disk.sizes"]] template.disk_type = vm.info["hvparams"]["disk_type"] template.nics = [{"mode": mode, "link": link} for mode, link in zip(vm.info["nic.modes"], vm.info["nic.links"])] template.nic_type = vm.info["hvparams"]["nic_type"] # Hypervisor parameters. template.kernel_path = vm.info["hvparams"]["kernel_path"] template.root_path = vm.info["hvparams"]["root_path"] template.serial_console = vm.info["hvparams"]["serial_console"] template.boot_order = vm.info["hvparams"]["boot_order"] template.cdrom_image_path = vm.info["hvparams"]["cdrom_image_path"] template.cdrom2_image_path = vm.info["hvparams"]["cdrom2_image_path"] template.save() return template
def test_view_create_recover(self): """ Test the create view when recovering a failed vm Verifies: * vm can be successfully edited and created * vm object is reused * template object is reused * can only recover a vm in the failure state * owner cannot be changed (security) * editing user is not granted perms (security) """ url = '/vm/add/' args = tuple() fail_template = 'ganeti/virtual_machine/create.html' success_template = 'ganeti/virtual_machine/create_status.html' template = VirtualMachineTemplate() template.save() # create a failed vm failed_vm, cluster2 = self.create_virtual_machine(self.cluster, 'failed.example.com') failed_vm.owner=self.user.get_profile() failed_vm.template = template failed_vm.save() self.vm.rapi.GetJobStatus.response = JOB_RUNNING data = dict(cluster=self.cluster.id, start=True, owner=self.user.get_profile().id, #XXX remove this hostname=failed_vm.hostname, disk_template='plain', disk_count=1, disk_size_0=1000, memory=256, vcpus=2, root_path='/', nic_type='paravirtual', disk_type = 'paravirtual', nic_count=1, nic_link_0 = 'br43', nic_mode_0='routed', boot_order='disk', os='image+ubuntu-lucid', pnode=self.cluster.nodes.all()[0], snode=self.cluster.nodes.all()[1]) errors = [ {'hostname': self.vm.hostname}, # attempt to recover vm that hasn't failed {'hostname': failed_vm.hostname, 'owner': self.user1.pk} # attempt to change owner ] self.assert_view_values(url, args, data, errors, fail_template) #noinspection PyUnusedLocal def tests(user, response): created_vm = VirtualMachine.objects.get(pk=failed_vm.pk) self.assertEqual(template.pk, created_vm.template_id) self.assertNotEqual(None, created_vm.last_job_id) users = [self.superuser] self.assert_200(url, args, users, success_template, data=data, method='post', tests=tests, follow=True)
def create(request, cluster_slug=None): """ Create a new instance Store in DB and Create on given cluster """ user = request.user if not(user.is_superuser or user.has_any_perms(Cluster, ['admin', 'create_vm'])): raise Http403( _('You do not have permission to create virtual machines')) if cluster_slug is not None: cluster = get_object_or_404(Cluster, slug=cluster_slug) else: cluster = None if request.method == 'POST': form = NewVirtualMachineForm(user, request.POST) if form.is_valid(): data = form.cleaned_data start = data.get('start') no_install = data.get('no_install') owner = data.get('owner') grantee = data.get('grantee') cluster = data.get('cluster') hostname = data.get('hostname') disk_template = data.get('disk_template') # Default to not pass in pnode and snode # since these will be set if the form is correct pnode = None snode = None os = data.get('os') name_check = data.get('name_check') iallocator = data.get('iallocator') # Hidden fields iallocator_hostname = None if 'iallocator_hostname' in data: iallocator_hostname = data.get('iallocator_hostname') # BEPARAMS vcpus = data.get('vcpus') disks = data.get('disks') disk_size = data.get('disk_size') nics = data.get('nics') memory = data.get('memory') # If iallocator was not checked do not pass in the iallocator # name. If iallocator was checked don't pass snode,pnode. if not iallocator: iallocator_hostname = None pnode = data.get('pnode') # If drbd is being used assign the secondary node if disk_template == 'drbd' and pnode is not None: snode = data.get('snode') # Create dictionary of only parameters supposed to be in hvparams hv = data.get('hypervisor') hvparams = {} hvparam_fields = () if hv == 'xen-pvm': hvparam_fields = ('kernel_path', 'root_path') elif hv == 'xen-hvm': hvparam_fields = ( 'boot_order', 'disk_type', 'nic_type', 'cdrom_image_path', ) elif hv == 'kvm': hvparam_fields = [ 'kernel_path', 'root_path', 'serial_console', 'boot_order', 'disk_type', 'cdrom_image_path', 'nic_type', ] # Check before adding cdrom2; see #11655. if has_cdrom2(cluster): hvparam_fields.append('cdrom2_image_path') # Force cdrom disk type to IDE; see #9297. hvparams['cdrom_disk_type'] = 'ide' for field in hvparam_fields: hvparams[field] = data[field] # XXX attempt to load the virtual machine. This ensure that if # there was a previous vm with the same hostname, but had not # successfully been deleted, then it will be deleted now try: VirtualMachine.objects.get(cluster=cluster, hostname=hostname) except VirtualMachine.DoesNotExist: pass try: job_id = cluster.rapi.CreateInstance('create', hostname, disk_template, disks,nics, no_install=no_install, start=start, os=os, pnode=pnode, snode=snode, name_check=name_check, ip_check=name_check, iallocator=iallocator_hostname, hypervisor=hv, hvparams=hvparams, beparams={"memory": memory, "vcpus":vcpus}) except GanetiApiError, e: msg = '%s: %s' % (_('Error creating virtual machine on this cluster'),e) form._errors["cluster"] = form.error_class([msg]) else: # Check for a vm recovery, If it is not found then if 'vm_recovery' in data: vm = data['vm_recovery'] vm_template = vm.template else: vm_template = VirtualMachineTemplate() vm = VirtualMachine(owner=owner) vm.cluster = cluster vm.hostname = hostname vm.ram = memory vm.virtual_cpus = vcpus vm.disk_size = disk_size # save temporary template # XXX copy each property in data. Avoids errors from properties # that don't exist on the model for k,v in data.items(): setattr(vm_template, k, v) vm_template.save() vm.template = vm_template vm.ignore_cache = True # Do a dance to get the VM and the job referencing each other. vm.save() job = Job.objects.create(job_id=job_id, obj=vm, cluster=cluster) job.save() vm.last_job = job vm.save() # grant admin permissions to the owner. Only do this for new # VMs. otherwise we run the risk of granting perms to a # different owner. We should be preventing that elsewhere, but # lets be extra careful since this check is cheap. if 'vm_recovery' in data: log_action('VM_RECOVER', user, vm, job) else: grantee.grant('admin', vm) log_action('CREATE', user, vm) return HttpResponseRedirect( reverse('instance-detail', args=[cluster.slug, vm.hostname])) cluster_defaults = {} if 'cluster' in request.POST and request.POST['cluster'] != '': try: cluster = Cluster.objects.get(pk=request.POST['cluster']) if cluster.info: cluster_defaults = cluster_default_info(cluster) except Cluster.DoesNotExist: pass
def test_view_create_recover(self): """ Test the create view when recovering a failed vm Verifies: * vm can be successfully edited and created * vm object is reused * template object is reused * can only recover a vm in the failure state * owner cannot be changed (security) * editing user is not granted perms (security) """ url = '/vm/add/' args = tuple() fail_template = 'ganeti/virtual_machine/create.html' success_template = 'ganeti/virtual_machine/create_status.html' template = VirtualMachineTemplate() template.save() # create a failed vm failed_vm, cluster2 = self.create_virtual_machine( self.cluster, 'failed.example.com') failed_vm.owner = self.user.get_profile() failed_vm.template = template failed_vm.save() self.vm.rapi.GetJobStatus.response = JOB_RUNNING data = dict( cluster=self.cluster.id, start=True, owner=self.user.get_profile().id, #XXX remove this hostname=failed_vm.hostname, disk_template='plain', disk_count=1, disk_size_0=1000, memory=256, vcpus=2, root_path='/', nic_type='paravirtual', disk_type='paravirtual', nic_count=1, nic_link_0='br43', nic_mode_0='routed', boot_order='disk', os='image+ubuntu-lucid', pnode=self.cluster.nodes.all()[0], snode=self.cluster.nodes.all()[1]) errors = [ { 'hostname': self.vm.hostname }, # attempt to recover vm that hasn't failed { 'hostname': failed_vm.hostname, 'owner': self.user1.pk } # attempt to change owner ] self.assert_view_values(url, args, data, errors, fail_template) #noinspection PyUnusedLocal def tests(user, response): created_vm = VirtualMachine.objects.get(pk=failed_vm.pk) self.assertEqual(template.pk, created_vm.template_id) self.assertNotEqual(None, created_vm.last_job_id) users = [self.superuser] self.assert_200(url, args, users, success_template, data=data, method='post', tests=tests, follow=True)