Exemplo n.º 1
0
 def list(self, vpoolguid=None, storagerouterguid=None, query=None):
     """
     Overview of all vDisks
     :param vpoolguid: Guid of the vPool to retrieve its disks
     :type vpoolguid: str
     :param storagerouterguid: Guid of the StorageRouter to retrieve its disks
     :type storagerouterguid: str
     :param query: A query to be executed if required
     :type query: DataQuery
     """
     if vpoolguid is not None:
         vpool = VPool(vpoolguid)
         vdisks = vpool.vdisks
     elif storagerouterguid is not None:
         storagerouter = StorageRouter(storagerouterguid)
         vdisks = DataList(
             VDisk, {
                 'type':
                 DataList.where_operator.AND,
                 'items': [('guid', DataList.operator.IN,
                            storagerouter.vdisks_guids)]
             })
     else:
         vdisks = VDiskList.get_vdisks()
     if query is not None:
         query_vdisk_guids = DataList(VDisk, query).guids
         vdisks = [
             vdisk for vdisk in vdisks if vdisk.guid in query_vdisk_guids
         ]
     return vdisks
    def list(self, discover=False, ip=None, node_id=None):
        """
        Lists all available ALBA Nodes
        :param discover: If True and IP provided, return list of single ALBA node, If True and no IP provided, return all ALBA nodes else return modeled ALBA nodes
        :param ip: IP of ALBA node to retrieve
        :param node_id: ID of the ALBA node
        """
        if discover is False and (ip is not None or node_id is not None):
            raise RuntimeError('Discover is mutually exclusive with IP and nodeID')
        if (ip is None and node_id is not None) or (ip is not None and node_id is None):
            raise RuntimeError('Both IP and nodeID need to be specified')

        if discover is False:
            return AlbaNodeList.get_albanodes()

        if ip is not None:
            node = AlbaNode(volatile=True)
            node.ip = ip
            node.type = 'ASD'
            node.node_id = node_id
            node.port = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|port'.format(node_id))
            node.username = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|username'.format(node_id))
            node.password = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|password'.format(node_id))
            data = node.client.get_metadata()
            if data['_success'] is False and data['_error'] == 'Invalid credentials':
                raise RuntimeError('Invalid credentials')
            if data['node_id'] != node_id:
                raise RuntimeError('Unexpected node identifier. {0} vs {1}'.format(data['node_id'], node_id))
            node_list = DataList(AlbaNode, {})
            node_list._executed = True
            node_list._guids = [node.guid]
            node_list._objects = {node.guid: node}
            node_list._data = {node.guid: {'guid': node.guid, 'data': node._data}}
            return node_list

        nodes = {}
        model_node_ids = [node.node_id for node in AlbaNodeList.get_albanodes()]
        found_node_ids = []
        asd_node_ids = []
        if EtcdConfiguration.dir_exists('/ovs/alba/asdnodes'):
            asd_node_ids = EtcdConfiguration.list('/ovs/alba/asdnodes')

        for node_id in asd_node_ids:
            node = AlbaNode(volatile=True)
            node.type = 'ASD'
            node.node_id = node_id
            node.ip = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|ip'.format(node_id))
            node.port = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|port'.format(node_id))
            node.username = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|username'.format(node_id))
            node.password = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main|password'.format(node_id))
            if node.node_id not in model_node_ids and node.node_id not in found_node_ids:
                nodes[node.guid] = node
                found_node_ids.append(node.node_id)
        node_list = DataList(AlbaNode, {})
        node_list._executed = True
        node_list._guids = nodes.keys()
        node_list._objects = nodes
        node_list._data = dict([(node.guid, {'guid': node.guid, 'data': node._data}) for node in nodes.values()])
        return node_list
Exemplo n.º 3
0
 def test_primarykeys(self):
     """
     Validates whether the primary keys are kept in sync
     """
     disk = TestDisk()
     disk.name = 'disk'
     keys = DataList.get_pks(disk._namespace, disk._name)
     self.assertEqual(len(keys), 0, 'There should be no primary keys ({0})'.format(len(keys)))
     disk.save()
     keys = DataList.get_pks(disk._namespace, disk._name)
     self.assertEqual(len(keys), 1, 'There should be one primary key ({0})'.format(len(keys)))
     disk.delete()
     keys = DataList.get_pks(disk._namespace, disk._name)
     self.assertEqual(len(keys), 0, 'There should be no primary keys ({0})'.format(len(keys)))
Exemplo n.º 4
0
 def _get_list_property(self, attribute):
     """
     Getter for the list property
     It will execute the related query every time to return a list of hybrid objects that
     refer to this object. The resulting data will be stored or merged into the cached list
     preserving as much already loaded objects as possible
     """
     info = self._objects[attribute]['info']
     remote_class = Descriptor().load(info['class']).get_object()
     remote_key = info['key']
     datalist = DataList.get_relation_set(remote_class, remote_key,
                                          self.__class__, attribute,
                                          self.guid)
     if self._objects[attribute]['data'] is None:
         self._objects[attribute]['data'] = DataObjectList(
             datalist.data, remote_class)
     else:
         self._objects[attribute]['data'].update(datalist.data)
     if info['list'] is True:
         return self._objects[attribute]['data']
     else:
         data = self._objects[attribute]['data']
         if len(data) > 1:
             raise InvalidRelationException(
                 'More than one element found in {0}'.format(attribute))
         return data[0] if len(data) == 1 else None
Exemplo n.º 5
0
 def list(self, vpoolguid=None, storagerouterguid=None):
     """
     Overview of all vDisks
     :param vpoolguid: Guid of the vPool to retrieve its disks
     :type vpoolguid: str
     :param storagerouterguid: Guid of the StorageRouter to retrieve its disks
     :type storagerouterguid: str
     :return: List of vDisks matching the parameters specified
     :rtype: list[ovs.dal.hybrids.vdisk.VDisk]
     """
     if vpoolguid is not None:
         vpool = VPool(vpoolguid)
         vdisks = vpool.vdisks
     elif storagerouterguid is not None:
         storagerouter = StorageRouter(storagerouterguid)
         vdisks = DataList(
             VDisk, {
                 'type':
                 DataList.where_operator.AND,
                 'items': [('guid', DataList.operator.IN,
                            storagerouter.vdisks_guids)]
             })
     else:
         vdisks = VDiskList.get_vdisks()
     return vdisks
Exemplo n.º 6
0
 def get_vpools():
     """
     Returns a list of all VPools
     """
     return DataList(VPool, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 7
0
 def list(self, query=None):
     """
     Overview of all backend types
     """
     if query is not None:
         query = json.loads(query)
         return DataList(BackendType, query)
     return BackendTypeList.get_backend_types()
Exemplo n.º 8
0
 def get_licenses():
     """
     Returns a list of all Licenses
     """
     return DataList(License, {
         'type': DataList.where_operator.AND,
         'items': []
     })
 def get_albanodes():
     """
     Returns a list of all AlbaNodes
     """
     return DataList(AlbaNode, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 10
0
 def get_services():
     """
     Get all services of all types
     """
     return DataList(Service, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 11
0
 def get_users():
     """
     Returns a list of all Users
     """
     return DataList(User, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 12
0
 def get_partitions():
     """
     Returns a list of all Partitions
     """
     return DataList(DiskPartition, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 13
0
 def get_mgmtcenters():
     """
     Returns a list of MgmtCenters
     """
     return DataList(MgmtCenter, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 14
0
 def get_vdisks():
     """
     Returns a list of all VDisks
     """
     return DataList(VDisk, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 15
0
 def get_backends():
     """
     Returns a list of all Backends
     """
     return DataList(Backend, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 16
0
 def get_pmachines():
     """
     Returns a list of all PMachines
     """
     return DataList(PMachine, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 17
0
 def get_servicetypes():
     """
     Returns a list of all ServiceTypes
     """
     return DataList(ServiceType, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 18
0
 def get_failure_domains():
     """
     Returns a list of all failure domains
     """
     return DataList(FailureDomain, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 19
0
 def get_storagerouters():
     """
     Returns a list of all StorageRouters
     """
     return DataList(StorageRouter, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 20
0
 def get_storagedrivers_by_storagerouter(machineguid):
     """
     Returns a list of all StorageDrivers for Storage Router
     """
     storagedrivers = DataList({'object': StorageDriver,
                                'data': DataList.select.GUIDS,
                                'query': {'type': DataList.where_operator.AND,
                                          'items': [('storagerouter_guid', DataList.operator.EQUALS, machineguid)]}}).data
     return DataObjectList(storagedrivers, StorageDriver)
Exemplo n.º 21
0
 def get_storagedrivers():
     """
     Returns a list of all StorageDrivers
     """
     storagedrivers = DataList({'object': StorageDriver,
                                'data': DataList.select.GUIDS,
                                'query': {'type': DataList.where_operator.AND,
                                          'items': []}}).data
     return DataObjectList(storagedrivers, StorageDriver)
Exemplo n.º 22
0
 def get_vtemplates():
     """
     Returns vTemplates
     """
     return DataList(
         VMachine, {
             'type': DataList.where_operator.AND,
             'items': [('is_vtemplate', DataList.operator.EQUALS, True)]
         })
Exemplo n.º 23
0
 def get_vtemplates():
     """
     Returns vTemplates
     """
     vmachines = DataList({'object': VMachine,
                           'data': DataList.select.GUIDS,
                           'query': {'type': DataList.where_operator.AND,
                                     'items': [('is_vtemplate', DataList.operator.EQUALS, True)]}}).data
     return DataObjectList(vmachines, VMachine)
Exemplo n.º 24
0
 def get_alba_node_clusters():
     """
     Returns a list of all ALBABackends
     :rtype: DataList[ovs.dal.hybrids.albanodecluster.AlbaNodeCluster]
     """
     return DataList(AlbaNodeCluster, {
         'type': DataList.where_operator.AND,
         'items': []
     })
Exemplo n.º 25
0
 def get_groups():
     """
     Returns a list of all Groups
     """
     groups = DataList({'object': Group,
                        'data': DataList.select.GUIDS,
                        'query': {'type': DataList.where_operator.AND,
                                  'items': []}}).data
     return DataObjectList(groups, Group)
Exemplo n.º 26
0
 def get_without_vmachine():
     """
     Gets all vDisks without a vMachine
     """
     return DataList(
         VDisk, {
             'type': DataList.where_operator.AND,
             'items': [('vmachine_guid', DataList.operator.EQUALS, None)]
         })
Exemplo n.º 27
0
 def get_in_volume_ids(volume_ids):
     """
     Returns all vDisks which volume_id is in the given list
     """
     return DataList(
         VDisk, {
             'type': DataList.where_operator.AND,
             'items': [('volume_id', DataList.operator.IN, volume_ids)]
         })
Exemplo n.º 28
0
 def get_masters():
     """
     Get all MASTER StorageRouters
     """
     return DataList(
         StorageRouter, {
             'type': DataList.where_operator.AND,
             'items': [('node_type', DataList.operator.EQUALS, 'MASTER')]
         })
Exemplo n.º 29
0
 def get_partitions():
     """
     Returns a list of all Partitions
     """
     partitions = DataList({'object': DiskPartition,
                            'data': DataList.select.GUIDS,
                            'query': {'type': DataList.where_operator.AND,
                                      'items': []}}).data
     return DataObjectList(partitions, DiskPartition)
Exemplo n.º 30
0
 def get_slaves():
     """
     Get all SLAVE StorageRouters
     """
     return DataList(
         StorageRouter, {
             'type': DataList.where_operator.AND,
             'items': [('node_type', DataList.operator.EQUALS, 'EXTRA')]
         })
Exemplo n.º 31
0
 def get_vmachines():
     """
     Returns a list of all VMachines
     """
     vmachines = DataList({'object': VMachine,
                           'data': DataList.select.GUIDS,
                           'query': {'type': DataList.where_operator.AND,
                                     'items': []}}).data
     return DataObjectList(vmachines, VMachine)
Exemplo n.º 32
0
 def get_brandings():
     """
     Returns a list of all brandings
     """
     brandings = DataList({'object': Branding,
                           'data': DataList.select.GUIDS,
                           'query': {'type': DataList.where_operator.AND,
                                     'items': []}}).data
     return DataObjectList(brandings, Branding)
Exemplo n.º 33
0
 def _clean_all(self):
     """
     Cleans all disks and machines
     """
     machine = TestMachine()
     keys = DataList.get_pks(machine._namespace, machine._classname)
     for guid in keys:
         try:
             machine = TestMachine(guid)
             for disk in machine.disks:
                 disk.delete()
             machine.delete()
         except (ObjectNotFoundException, ValueError):
             pass
     disk = TestDisk()
     keys = DataList.get_pks(disk._namespace, disk._classname)
     for guid in keys:
         try:
             disk = TestDisk(guid)
             disk.delete()
         except (ObjectNotFoundException, ValueError):
             pass
Exemplo n.º 34
0
 def _get_list_property(self, attribute):
     """
     Getter for the list property
     It will execute the related query every time to return a list of hybrid objects that
     refer to this object. The resulting data will be stored or merged into the cached list
     preserving as much already loaded objects as possible
     """
     info = self._objects[attribute]['info']
     remote_class = Descriptor().load(info['class']).get_object()
     remote_key   = info['key']
     datalist = DataList.get_relation_set(remote_class, remote_key, self.__class__, attribute, self.guid)
     if self._objects[attribute]['data'] is None:
         self._objects[attribute]['data'] = DataObjectList(datalist.data, remote_class)
     else:
         self._objects[attribute]['data'].merge(datalist.data)
     if info['list'] is True:
         return self._objects[attribute]['data']
     else:
         data = self._objects[attribute]['data']
         return data[0] if len(data) == 1 else None
Exemplo n.º 35
0
    def test_lotsofobjects(self):
        """
        A test creating, linking and querying a lot of objects
        """
        print ''
        print 'cleaning up'
        self._clean_all()
        print 'start test'
        tstart = time.time()
        if getattr(LotsOfObjects, 'amount_of_machines', None) is None:
            LotsOfObjects.amount_of_machines = 50
        if getattr(LotsOfObjects, 'amount_of_disks', None) is None:
            LotsOfObjects.amount_of_disks = 5
        load_data = True
        mguids = []
        if load_data:
            print '\nstart loading data'
            start = time.time()
            runtimes = []
            for i in xrange(0, int(LotsOfObjects.amount_of_machines)):
                mstart = time.time()
                machine = TestMachine()
                machine.name = 'machine_{0}'.format(i)
                machine.save()
                mguids.append(machine.guid)
                for ii in xrange(0, int(LotsOfObjects.amount_of_disks)):
                    disk = TestDisk()
                    disk.name = 'disk_{0}_{1}'.format(i, ii)
                    disk.size = ii * 100
                    disk.machine = machine
                    disk.save()
                avgitemspersec = ((i + 1) * LotsOfObjects.amount_of_disks) / (time.time() - start)
                itemspersec = LotsOfObjects.amount_of_disks / (time.time() - mstart)
                runtimes.append(itemspersec)
                LotsOfObjects._print_progress('* machine {0}/{1} (run: {2} dps, avg: {3} dps)'.format(i + 1, int(LotsOfObjects.amount_of_machines), round(itemspersec, 2), round(avgitemspersec, 2)))
            runtimes.sort()
            print '\nloading done ({0}s). min: {1} dps, max: {2} dps'.format(round(time.time() - tstart, 2), round(runtimes[1], 2), round(runtimes[-2], 2))

        test_queries = True
        if test_queries:
            print '\nstart queries'
            start = time.time()
            runtimes = []
            for i in xrange(0, int(LotsOfObjects.amount_of_machines)):
                mstart = time.time()
                machine = TestMachine(mguids[i])
                assert len(machine.disks) == LotsOfObjects.amount_of_disks, 'Not all disks were retrieved ({0})'.format(len(machine.disks))
                avgitemspersec = ((i + 1) * LotsOfObjects.amount_of_disks) / (time.time() - start)
                itemspersec = LotsOfObjects.amount_of_disks / (time.time() - mstart)
                runtimes.append(itemspersec)
                LotsOfObjects._print_progress('* machine {0}/{1} (run: {2} dps, avg: {3} dps)'.format(i + 1, int(LotsOfObjects.amount_of_machines), round(itemspersec, 2), round(avgitemspersec, 2)))
            runtimes.sort()
            print '\ncompleted ({0}s). min: {1} dps, max: {2} dps'.format(round(time.time() - tstart, 2), round(runtimes[1], 2), round(runtimes[-2], 2))

            print '\nstart full query on disk property'
            start = time.time()
            dlist = DataList(TestDisk, {'type': DataList.where_operator.AND,
                                        'items': [('size', DataList.operator.GT, 100),
                                                  ('size', DataList.operator.LT, (LotsOfObjects.amount_of_disks - 1) * 100)]})
            amount = len(dlist)
            assert amount == (LotsOfObjects.amount_of_disks - 3) * LotsOfObjects.amount_of_machines, 'Incorrect amount of disks. Found {0} instead of {1}'.format(amount, int((LotsOfObjects.amount_of_disks - 3) * LotsOfObjects.amount_of_machines))
            seconds_passed = (time.time() - start)
            print 'completed ({0}s) in {1} seconds (avg: {2} dps)'.format(round(time.time() - tstart, 2), round(seconds_passed, 2), round(LotsOfObjects.amount_of_machines * LotsOfObjects.amount_of_disks / seconds_passed, 2))

            print '\nloading disks (all)'
            start = time.time()
            for i in xrange(0, int(LotsOfObjects.amount_of_machines)):
                machine = TestMachine(mguids[i])
                _ = [_ for _ in machine.disks]
            seconds_passed = (time.time() - start)
            print 'completed ({0}s) in {1} seconds (avg: {2} dps)'.format(round(time.time() - tstart, 2), round(seconds_passed, 2), round(LotsOfObjects.amount_of_machines * LotsOfObjects.amount_of_disks / seconds_passed, 2))

            print '\nstart full query on disk property (using cached objects)'
            dlist._volatile.delete(dlist._key)
            start = time.time()
            dlist = DataList(TestDisk, {'type': DataList.where_operator.AND,
                                        'items': [('size', DataList.operator.GT, 100),
                                                  ('size', DataList.operator.LT, (LotsOfObjects.amount_of_disks - 1) * 100)]})
            amount = len(dlist)
            assert amount == (LotsOfObjects.amount_of_disks - 3) * LotsOfObjects.amount_of_machines, 'Incorrect amount of disks. Found {0} instead of {1}'.format(amount, int((LotsOfObjects.amount_of_disks - 3) * LotsOfObjects.amount_of_machines))
            seconds_passed = (time.time() - start)
            print 'completed ({0}s) in {1} seconds (avg: {2} dps)'.format(round(time.time() - tstart, 2), round(seconds_passed, 2), round(LotsOfObjects.amount_of_machines * LotsOfObjects.amount_of_disks / seconds_passed, 2))

            print '\nstart property sort'
            dlist = DataList(TestDisk, {'type': DataList.where_operator.AND,
                                        'items': []})
            start = time.time()
            dlist.sort(key=lambda a: Toolbox.extract_key(a, 'size'))
            seconds_passed = (time.time() - start)
            print 'completed ({0}s) in {1} seconds (avg: {2} dps)'.format(round(time.time() - tstart, 2), round(seconds_passed, 2), round(LotsOfObjects.amount_of_machines * LotsOfObjects.amount_of_disks / seconds_passed, 2))

            print '\nstart dynamic sort'
            dlist._volatile.delete(dlist._key)
            dlist = DataList(TestDisk, {'type': DataList.where_operator.AND,
                                        'items': []})
            start = time.time()
            dlist.sort(key=lambda a: Toolbox.extract_key(a, 'predictable'))
            seconds_passed = (time.time() - start)
            print 'completed ({0}s) in {1} seconds (avg: {2} dps)'.format(round(time.time() - tstart, 2), round(seconds_passed, 2), round(LotsOfObjects.amount_of_machines * LotsOfObjects.amount_of_disks / seconds_passed, 2))

        clean_data = True
        if clean_data:
            print '\ncleaning up'
            start = time.time()
            runtimes = []
            for i in xrange(0, int(LotsOfObjects.amount_of_machines)):
                mstart = time.time()
                machine = TestMachine(mguids[i])
                for disk in machine.disks:
                    disk.delete()
                machine.delete()
                avgitemspersec = ((i + 1) * LotsOfObjects.amount_of_disks) / (time.time() - start)
                itemspersec = LotsOfObjects.amount_of_disks / (time.time() - mstart)
                runtimes.append(itemspersec)
                LotsOfObjects._print_progress('* machine {0}/{1} (run: {2} dps, avg: {3} dps)'.format(i + 1, int(LotsOfObjects.amount_of_machines), round(itemspersec, 2), round(avgitemspersec, 2)))
            runtimes.sort()
            print '\ncompleted ({0}s). min: {1} dps, max: {2} dps'.format(round(time.time() - tstart, 2), round(runtimes[1], 2), round(runtimes[-2], 2))
Exemplo n.º 36
0
 def test_pk_stretching(self):
     """
     Validates whether the primary key lists scale correctly.
     * X entries will be added (e.g. 10000)
     * X/2 random entries will be deleted (5000)
     * X/2 entries will be added again (5000)
     * X entries will be removed (10000)
     No entries should be remaining
     """
     print ''
     print 'starting test'
     amount_of_objects = 10000  # Must be an even number!
     machine = TestMachine()
     runtimes = []
     # First fill
     start = time.time()
     keys = DataList._get_pks(machine._namespace, machine._name)
     self.assertEqual(len(list(keys)), 0, 'There should be no primary keys yet ({0})'.format(len(list(keys))))
     guids = []
     mstart = time.time()
     for i in xrange(0, amount_of_objects):
         guid = str(uuid.uuid4())
         guids.append(guid)
         DataList.add_pk(machine._namespace, machine._name, guid)
         keys = DataList._get_pks(machine._namespace, machine._name)
         self.assertEqual(len(list(keys)), len(guids), 'There should be {0} primary keys instead of {1}'.format(len(guids), len(list(keys))))
         if i % 100 == 99:
             avgitemspersec = (i + 1) / (time.time() - start)
             itemspersec = 100 / (time.time() - mstart)
             runtimes.append(itemspersec)
             self._print_progress('* adding object {0}/{1} (run: {2} ops, avg: {3} ops)'.format(i + 1, int(amount_of_objects), round(itemspersec, 2), round(avgitemspersec, 2)))
             mstart = time.time()
     print ''
     # First delete
     amount_of_objects /= 2
     shuffle(guids)  # Make the test a bit more realistic
     guids_copy = guids[:]
     dstart = time.time()
     mstart = time.time()
     for i in xrange(0, amount_of_objects):
         guid = guids_copy[i]
         guids.remove(guid)
         DataList.delete_pk(machine._namespace, machine._name, guid)
         keys = DataList._get_pks(machine._namespace, machine._name)
         self.assertEqual(len(list(keys)), len(guids), 'There should be {0} primary keys instead of {1}'.format(len(guids), len(list(keys))))
         if i % 100 == 99:
             avgitemspersec = (i + 1) / (time.time() - dstart)
             itemspersec = 100 / (time.time() - mstart)
             runtimes.append(itemspersec)
             self._print_progress('* delete object {0}/{1} (run: {2} ops, avg: {3} ops)'.format(i + 1, int(amount_of_objects), round(itemspersec, 2), round(avgitemspersec, 2)))
             mstart = time.time()
     keys = DataList._get_pks(machine._namespace, machine._name)
     self.assertEqual(len(list(keys)), amount_of_objects, 'There should be {0} primary keys ({1})'.format(amount_of_objects, len(list(keys))))
     print ''
     # Second round
     sstart = time.time()
     mstart = time.time()
     for i in xrange(0, amount_of_objects):
         guid = str(uuid.uuid4())
         guids.append(guid)
         DataList.add_pk(machine._namespace, machine._name, guid)
         keys = DataList._get_pks(machine._namespace, machine._name)
         self.assertEqual(len(list(keys)), len(guids), 'There should be {0} primary keys instead of {1}'.format(len(guids), len(list(keys))))
         if i % 100 == 99:
             avgitemspersec = (i + 1) / (time.time() - sstart)
             itemspersec = 100 / (time.time() - mstart)
             runtimes.append(itemspersec)
             self._print_progress('* adding object {0}/{1} (run: {2} ops, avg: {3} ops)'.format(i + 1, int(amount_of_objects), round(itemspersec, 2), round(avgitemspersec, 2)))
             mstart = time.time()
     print ''
     # Second delete
     amount_of_objects *= 2
     shuffle(guids)  # Make the test a bit more realistic
     guids_copy = guids[:]
     dstart = time.time()
     mstart = time.time()
     for i in xrange(0, amount_of_objects):
         guid = guids_copy[i]
         guids.remove(guid)
         DataList.delete_pk(machine._namespace, machine._name, guid)
         keys = DataList._get_pks(machine._namespace, machine._name)
         self.assertEqual(len(list(keys)), len(guids), 'There should be {0} primary keys instead of {1}'.format(len(guids), len(list(keys))))
         if i % 100 == 99:
             avgitemspersec = (i + 1) / (time.time() - dstart)
             itemspersec = 100 / (time.time() - mstart)
             runtimes.append(itemspersec)
             self._print_progress('* delete object {0}/{1} (run: {2} ops, avg: {3} ops)'.format(i + 1, int(amount_of_objects), round(itemspersec, 2), round(avgitemspersec, 2)))
             mstart = time.time()
     keys = DataList._get_pks(machine._namespace, machine._name)
     self.assertEqual(len(guids), 0, 'All guids should be removed. {0} left'.format(len(guids)))
     self.assertEqual(len(list(keys)), 0, 'There should be no primary keys ({0})'.format(len(list(keys))))
     seconds_passed = (time.time() - start)
     runtimes.sort()
     print '\ncompleted in {0} seconds (avg: {1} ops, min: {2} ops, max: {3} ops)'.format(round(seconds_passed, 2), round((amount_of_objects * 3) / seconds_passed, 2), round(runtimes[1], 2), round(runtimes[-2], 2))
Exemplo n.º 37
0
    def save(self, recursive=False, skip=None):
        """
        Save the object to the persistent backend and clear cache, making use
        of the specified conflict resolve settings.
        It will also invalidate certain caches if required. For example lists pointing towards this
        object
        """
        invalid_fields = []
        for prop in self._properties:
            if prop.mandatory is True and self._data[prop.name] is None:
                invalid_fields.append(prop.name)
        for relation in self._relations:
            if relation.mandatory is True and self._data[relation.name]['guid'] is None:
                invalid_fields.append(relation.name)
        if len(invalid_fields) > 0:
            raise MissingMandatoryFieldsException('Missing fields on {0}: {1}'.format(self._name, ', '.join(invalid_fields)))

        if recursive:
            # Save objects that point to us (e.g. disk.vmachine - if this is disk)
            for relation in self._relations:
                if relation.name != skip:  # disks will be skipped
                    item = getattr(self, relation.name)
                    if item is not None:
                        item.save(recursive=True, skip=relation.foreign_key)

            # Save object we point at (e.g. machine.disks - if this is machine)
            relations = RelationMapper.load_foreign_relations(self.__class__)
            if relations is not None:
                for key, info in relations.iteritems():
                    if key != skip:  # machine will be skipped
                        if info['list'] is True:
                            for item in getattr(self, key).iterloaded():
                                item.save(recursive=True, skip=info['key'])
                        else:
                            item = getattr(self, key)
                            if item is not None:
                                item.save(recursive=True, skip=info['key'])

        try:
            data = self._persistent.get(self._key)
        except KeyNotFoundException:
            if self._new:
                data = {}
            else:
                raise ObjectNotFoundException('{0} with guid \'{1}\' was deleted'.format(
                    self.__class__.__name__, self._guid
                ))
        changed_fields = []
        data_conflicts = []
        for attribute in self._data.keys():
            if self._data[attribute] != self._original[attribute]:
                # We changed this value
                changed_fields.append(attribute)
                if attribute in data and self._original[attribute] != data[attribute]:
                    # Some other process also wrote to the database
                    if self._datastore_wins is None:
                        # In case we didn't set a policy, we raise the conflicts
                        data_conflicts.append(attribute)
                    elif self._datastore_wins is False:
                        # If the datastore should not win, we just overwrite the data
                        data[attribute] = self._data[attribute]
                    # If the datastore should win, we discard/ignore our change
                else:
                    # Normal scenario, saving data
                    data[attribute] = self._data[attribute]
            elif attribute not in data:
                data[attribute] = self._data[attribute]
        if data_conflicts:
            raise ConcurrencyException('Got field conflicts while saving {0}. Conflicts: {1}'.format(
                self._name, ', '.join(data_conflicts)
            ))

        # Refresh internal data structure
        self._data = copy.deepcopy(data)

        # First, update reverse index
        try:
            self._mutex_reverseindex.acquire(60)
            for relation in self._relations:
                key = relation.name
                original_guid = self._original[key]['guid']
                new_guid = self._data[key]['guid']
                if original_guid != new_guid:
                    if relation.foreign_type is None:
                        classname = self.__class__.__name__.lower()
                    else:
                        classname = relation.foreign_type.__name__.lower()
                    if original_guid is not None:
                        reverse_key = 'ovs_reverseindex_{0}_{1}'.format(classname, original_guid)
                        reverse_index = self._volatile.get(reverse_key)
                        if reverse_index is not None:
                            if relation.foreign_key in reverse_index:
                                entries = reverse_index[relation.foreign_key]
                                if self.guid in entries:
                                    entries.remove(self.guid)
                                    reverse_index[relation.foreign_key] = entries
                                    self._volatile.set(reverse_key, reverse_index)
                    if new_guid is not None:
                        reverse_key = 'ovs_reverseindex_{0}_{1}'.format(classname, new_guid)
                        reverse_index = self._volatile.get(reverse_key)
                        if reverse_index is not None:
                            if relation.foreign_key in reverse_index:
                                entries = reverse_index[relation.foreign_key]
                                if self.guid not in entries:
                                    entries.append(self.guid)
                                    reverse_index[relation.foreign_key] = entries
                                    self._volatile.set(reverse_key, reverse_index)
                            else:
                                reverse_index[relation.foreign_key] = [self.guid]
                                self._volatile.set(reverse_key, reverse_index)
                        else:
                            reverse_index = {relation.foreign_key: [self.guid]}
                            self._volatile.set(reverse_key, reverse_index)
            reverse_key = 'ovs_reverseindex_{0}_{1}'.format(self._name, self.guid)
            reverse_index = self._volatile.get(reverse_key)
            if reverse_index is None:
                reverse_index = {}
                relations = RelationMapper.load_foreign_relations(self.__class__)
                if relations is not None:
                    for key, _ in relations.iteritems():
                        reverse_index[key] = []
                self._volatile.set(reverse_key, reverse_index)
        finally:
            self._mutex_reverseindex.release()
        # Second, invalidate property lists
        try:
            self._mutex_listcache.acquire(60)
            cache_key = '{0}_{1}'.format(DataList.cachelink, self._name)
            cache_list = Toolbox.try_get(cache_key, {})
            change = False
            for list_key in cache_list.keys():
                fields = cache_list[list_key]
                if ('__all' in fields and self._new) or list(set(fields) & set(changed_fields)):
                    change = True
                    self._volatile.delete(list_key)
                    del cache_list[list_key]
            if change is True:
                self._volatile.set(cache_key, cache_list)
                self._persistent.set(cache_key, cache_list)
        finally:
            self._mutex_listcache.release()

        # Save the data
        self._persistent.set(self._key, self._data)
        DataList.add_pk(self._namespace, self._name, self._guid)

        # Invalidate the cache
        self._volatile.delete(self._key)

        self._original = copy.deepcopy(self._data)
        self.dirty = False
        self._new = False
Exemplo n.º 38
0
    def delete(self, abandon=False):
        """
        Delete the given object. It also invalidates certain lists
        """
        # Check foreign relations
        relations = RelationMapper.load_foreign_relations(self.__class__)
        if relations is not None:
            for key, info in relations.iteritems():
                items = getattr(self, key)
                if info['list'] is True:
                    if len(items) > 0:
                        if abandon is True:
                            for item in items.itersafe():
                                setattr(item, info['key'], None)
                                try:
                                    item.save()
                                except ObjectNotFoundException:
                                    pass
                        else:
                            raise LinkedObjectException('There are {0} items left in self.{1}'.format(len(items), key))
                elif items is not None:
                    # No list (so a 1-to-1 relation), so there should be an object, or None
                    item = items  # More clear naming
                    if abandon is True:
                        setattr(item, info['key'], None)
                        try:
                            item.save()
                        except ObjectNotFoundException:
                            pass
                    else:
                        raise LinkedObjectException('There is still an item linked in self.{0}'.format(key))

        # First, update reverse index
        try:
            self._mutex_reverseindex.acquire(60)
            for relation in self._relations:
                key = relation.name
                original_guid = self._original[key]['guid']
                if original_guid is not None:
                    if relation.foreign_type is None:
                        classname = self.__class__.__name__.lower()
                    else:
                        classname = relation.foreign_type.__name__.lower()
                    reverse_key = 'ovs_reverseindex_{0}_{1}'.format(classname, original_guid)
                    reverse_index = self._volatile.get(reverse_key)
                    if reverse_index is not None:
                        if relation.foreign_key in reverse_index:
                            entries = reverse_index[relation.foreign_key]
                            if self.guid in entries:
                                entries.remove(self.guid)
                                reverse_index[relation.foreign_key] = entries
                                self._volatile.set(reverse_key, reverse_index)
            self._volatile.delete('ovs_reverseindex_{0}_{1}'.format(self._name, self.guid))
        finally:
            self._mutex_reverseindex.release()
        # Second, invalidate property lists
        try:
            self._mutex_listcache.acquire(60)
            cache_key = '{0}_{1}'.format(DataList.cachelink, self._name)
            cache_list = Toolbox.try_get(cache_key, {})
            change = False
            for list_key in cache_list.keys():
                fields = cache_list[list_key]
                if '__all' in fields:
                    change = True
                    self._volatile.delete(list_key)
                    del cache_list[list_key]
            if change is True:
                self._volatile.set(cache_key, cache_list)
                self._persistent.set(cache_key, cache_list)
        finally:
            self._mutex_listcache.release()

        # Delete the object out of the persistent store
        try:
            self._persistent.delete(self._key)
        except KeyNotFoundException:
            pass

        # Delete the object and its properties out of the volatile store
        self.invalidate_dynamics()
        self._volatile.delete(self._key)
        DataList.delete_pk(self._namespace, self._name, self._guid)