Beispiel #1
0
    def setUp(self):
        models.client.GanetiRapiClient = RapiProxy

        # Cluster
        cluster = Cluster(hostname='test.cluster.gwm',
                          slug='test',
                          username='******',
                          password='******')
        #cluster.info = INFO
        cluster.save()

        # Template
        template_data = dict(
            template_name='new.vm.template',
            description='A new template.',
            cluster=cluster.id,
            start=True,
            name_check=True,
            disk_template='plain',
            disk_count=0,
            memory=256,
            vcpus=2,
            root_path='/',
            kernel_path='',
            cdrom_image_path='',
            serial_console=False,
            nic_type='paravirtual',
            disk_type='paravirtual',
            nic_count=0,
            boot_order='disk',
            os='image+ubuntu-lucid',
        )
        data = template_data.copy()
        data['cluster'] = cluster
        del data['disk_count']
        del data['nic_count']
        template = VirtualMachineTemplate(**data)
        template.save()

        # Template Fields
        fields = vars(template).keys()

        # Users
        self.create_users([
            ('superuser', {
                'is_superuser': True
            }),
            'cluster_admin',
        ])
        self.cluster_admin.grant('admin', cluster)

        self.users = [self.superuser, self.cluster_admin]
        self.template = template
        self.cluster = cluster
        self.template_data = template_data
        self.template_fields = fields
Beispiel #2
0
    def setUp(self):
        models.client.GanetiRapiClient = RapiProxy

        # Cluster
        cluster = Cluster(hostname='test.cluster.gwm', slug='test',
                          username='******', password='******')
        #cluster.info = INFO
        cluster.save()


        # Template
        template_data = dict(
            template_name='new.vm.template',
            description='A new template.',
            cluster=cluster.id,
            start=True,
            name_check=True,
            disk_template='plain',
            disk_count=0,
            memory=256,
            vcpus=2,
            root_path='/',
            kernel_path='',
            cdrom_image_path='',
            serial_console=False,
            nic_type='paravirtual',
            disk_type='paravirtual',
            nic_count=0,
            boot_order='disk',
            os='image+ubuntu-lucid',
         )
        data = template_data.copy()
        data['cluster'] = cluster
        del data['disk_count']
        del data['nic_count']
        template = VirtualMachineTemplate(**data)
        template.save()

        # Template Fields
        fields = vars(template).keys()

        # Users
        self.create_users([
            ('superuser', {'is_superuser':True}),
            'cluster_admin',
            ])
        self.cluster_admin.grant('admin', cluster)

        self.users = [self.superuser, self.cluster_admin]
        self.template = template
        self.cluster = cluster
        self.template_data = template_data
        self.template_fields = fields
def get_used_resources(cluster_user):
    """ help function for querying resources used for a given cluster_user """
    resources = {}
    owned_vms = cluster_user.virtual_machines.all()
    used = cluster_user.used_resources()
    clusters = cluster_user.permissable.get_objects_any_perms(Cluster)
    quotas = Cluster.get_quotas(clusters, cluster_user)

    for cluster, quota in quotas.items():
        resources[cluster] = {
            "used": used.pop(cluster.id) if cluster.id in used else USED_NOTHING,
            "set": quota
        }
        resources[cluster]["total"] = owned_vms.filter(cluster=cluster).count()
        resources[cluster]["running"] = owned_vms.filter(cluster=cluster, \
                                                    status="running").count()

    # add any clusters that have used resources but no perms (and thus no quota)
    # since we know they don't have a custom quota just add the default quota
    if used:
        for cluster in Cluster.objects.filter(pk__in=used):
            resources[cluster] = {"used":used[cluster.id],
                                  "set":cluster.get_default_quota()}
            resources[cluster]["total"] = owned_vms.filter(cluster=cluster).count()
            resources[cluster]["running"] = owned_vms.filter(cluster=cluster, \
                                                    status="running").count()

    return resources
Beispiel #4
0
def get_used_resources(cluster_user):
    """ help function for querying resources used for a given cluster_user """
    resources = {}
    owned_vms = cluster_user.virtual_machines.all()
    used = cluster_user.used_resources()
    clusters = cluster_user.permissable.get_objects_any_perms(Cluster)
    quotas = Cluster.get_quotas(clusters, cluster_user)

    for cluster, quota in quotas.items():
        resources[cluster] = {
            "used": used.pop(cluster.id) if cluster.id in used else USED_NOTHING,
            "set": quota
        }
        resources[cluster]["total"] = owned_vms.filter(cluster=cluster).count()
        resources[cluster]["running"] = owned_vms.filter(cluster=cluster, \
                                                    status="running").count()

    # add any clusters that have used resources but no perms (and thus no quota)
    # since we know they don't have a custom quota just add the default quota
    if used:
        for cluster in Cluster.objects.filter(pk__in=used):
            resources[cluster] = {"used":used[cluster.id],
                                  "set":cluster.get_default_quota()}
            resources[cluster]["total"] = owned_vms.filter(cluster=cluster).count()
            resources[cluster]["running"] = owned_vms.filter(cluster=cluster, \
                                                    status="running").count()

    return resources
Beispiel #5
0
    def setUp(self):
        self.tearDown()
        models.client.GanetiRapiClient = RapiProxy

        cluster = Cluster(hostname='test.cluster',
                          slug='test',
                          username='******',
                          password='******')
        cluster.id = 23  # XXX MySQL DB does not reset auto-increment IDs when an object is removed
        cluster.save()
        cluster.sync_nodes()

        template = VirtualMachineTemplate(template_name="Template1",
                                          cluster=cluster)
        template.disks = [{'size': 500}]
        template.nics = [{'mode': 'bridged', 'link': ''}]
        template.save()

        instance = VirtualMachine(hostname='new.vm.hostname', cluster=cluster)
        instance.info = INSTANCE
        instance.disks = []
        instance.nics = []
        instance.save()

        # Users
        self.create_users([
            ('superuser', {
                'is_superuser': True
            }),
            'cluster_admin',
            'create_vm',
            'unauthorized',
        ])
        self.cluster_admin.grant('admin', cluster)
        self.create_vm.grant('create_vm', cluster)

        self.create_template_data = dict(
            cluster=cluster.pk,
            template_name='foo_bar',
            memory=512,
            disk_template='plain',
            disk_count=0,
            nic_count=0,
        )

        self.cluster = cluster
        self.template = template
        self.instance = instance
        self.c = Client()
Beispiel #6
0
    def update_cluster(self, info, data, callback):
        """
        updates an individual Cluster, this is the actual work function

        @param info - info from ganeti
        @param data - data from database
        @param callback - callback fired when method is complete.
        """
        mtime = data['mtime']
        if not mtime or mtime < info['mtime']:
            print '    Cluster (updated) : %(hostname)s' % data
            #print '        %s :: %s' % (mtime, datetime.fromtimestamp(info['mtime']))
            # only update the whole object if it is new or modified.
            #
            parsed = Cluster.parse_persistent_info(info)
            Cluster.objects.filter(pk=data['id']) \
                .update(serialized_info=cPickle.dumps(info), **parsed)
        callback(data['id'])
    def setUp(self):
        self.tearDown()
        models.client.GanetiRapiClient = RapiProxy

        cluster = Cluster(hostname='test.cluster', slug='test',
                          username='******', password='******')
        cluster.id = 23  # XXX MySQL DB does not reset auto-increment
                         # IDs when an object is removed
        cluster.save()
        cluster.sync_nodes()

        template = VirtualMachineTemplate(template_name="Template1",
                                          cluster=cluster)
        template.disks = [{'size': 500}]
        template.nics = [{'mode': 'bridged', 'link': ''}]
        template.save()

        instance = VirtualMachine(hostname='new.vm.hostname', cluster=cluster)
        instance.info = INSTANCE
        instance.disks = []
        instance.nics = []
        instance.save()

        # Users
        self.create_users([
            ('superuser', {'is_superuser': True}),
            'cluster_admin',
            'create_vm',
            'unauthorized',
        ])
        self.cluster_admin.grant('admin', cluster)
        self.create_vm.grant('create_vm', cluster)

        self.create_template_data = dict(
            cluster=cluster.pk,
            template_name='foo_bar',
            memory=512,
            disk_template='plain',
            disk_count=0,
            nic_count=0,
        )

        self.cluster = cluster
        self.template = template
        self.instance = instance
        self.c = Client()
Beispiel #8
0
 def setUp(self):
     self.tearDown()
     self.cluster = Cluster(hostname="ganeti.osuosl.test")
     self.cluster.save()
Beispiel #9
0
class TestRapiCache(TestCase):
    def setUp(self):
        self.tearDown()
        self.cluster = Cluster(hostname="ganeti.osuosl.test")
        self.cluster.save()

    def tearDown(self):
        clear_rapi_cache()
        Cluster.objects.all().delete()

    def test_get_with_cluster(self):
        """
        Test getting a new rapi for a cluster
        
        Verifies:
            * rapi is returned
        """
        cluster = self.cluster
        rapi = get_rapi(cluster.hash, cluster)
        self.assertTrue(rapi)
        self.assertTrue(isinstance(rapi, (client.GanetiRapiClient,)))

    def test_get_with_id(self):
        """
        Test getting a new rapi for a cluster by cluster ID
        Verifies:
            * rapi is returned
        """
        cluster = self.cluster
        rapi = get_rapi(cluster.hash, cluster.id)
        self.assertTrue(rapi)
        self.assertTrue(isinstance(rapi, (client.GanetiRapiClient,)))

    def test_get_cached_client(self):
        """
        Test getting a cached rapi
        
        Verifies:
            * rapi returned is the same as the cached rapi
        """
        cluster = self.cluster
        rapi = get_rapi(cluster.hash, cluster.id)
        self.assertTrue(rapi)
        self.assertTrue(isinstance(rapi, (client.GanetiRapiClient,)))

        cached_rapi = get_rapi(cluster.hash, cluster)
        self.assertEqual(rapi, cached_rapi)

        cached_rapi = get_rapi(cluster.hash, cluster.id)
        self.assertEqual(rapi, cached_rapi)

    def test_get_changed_hash(self):
        """
        Test getting rapi after hash has changed
        
        Verifies:
            * a new rapi is created and returned
            * old rapi is removed from cache
            * reverse cache is now pointing to new hash
        """
        cluster = self.cluster
        old_hash = cluster.hash
        rapi = get_rapi(cluster.hash, cluster)

        cluster.hostname = "a.different.hostname"
        cluster.save()
        self.assertNotEqual(old_hash, cluster.hash, "new hash was not created")
        new_rapi = get_rapi(cluster.hash, cluster)
        self.assertTrue(rapi)
        self.assertTrue(isinstance(rapi, (client.GanetiRapiClient,)))
        self.assertNotEqual(rapi, new_rapi)
        self.assertFalse(old_hash in RAPI_CACHE, "old rapi client was not removed")

    def test_stale_hash(self):
        """
        Tests an object with a stale hash
        
        Verifies:
            * a rapi is created and stored using the current credentials
        """
        cluster = self.cluster
        stale_cluster = Cluster.objects.get(id=cluster.id)
        cluster.hostname = "a.different.hostname"
        cluster.save()
        clear_rapi_cache()
        stale_rapi = get_rapi(stale_cluster.hash, stale_cluster)
        self.assertTrue(stale_rapi)
        self.assertTrue(isinstance(stale_rapi, (client.GanetiRapiClient,)))

        fresh_rapi = get_rapi(cluster.hash, cluster)
        self.assertEqual(stale_rapi, fresh_rapi)

    def test_stale_hash_new_already_created(self):
        """
        Tests an object with a stale hash, but the new client was already
        created
        
        Verifies:
            * Existing client, with current hash, is returned
        """
        cluster = self.cluster
        stale_cluster = Cluster.objects.get(id=cluster.id)
        cluster.hostname = "a.different.hostname"
        cluster.save()
        clear_rapi_cache()
        fresh_rapi = get_rapi(cluster.hash, cluster)
        stale_rapi = get_rapi(stale_cluster.hash, stale_cluster)
        self.assertTrue(stale_rapi)
        self.assertTrue(isinstance(stale_rapi, (client.GanetiRapiClient,)))
        self.assertEqual(stale_rapi, fresh_rapi)