def test_inc_dec_get_counter(self): """ACCOUNT COUNTER (CORE): Increase, decrease and get counter """ account_update(once=True) rse_id = get_rse_id(rse='MOCK', **self.vo) account = InternalAccount('jdoe', **self.vo) account_counter.del_counter(rse_id=rse_id, account=account) account_counter.add_counter(rse_id=rse_id, account=account) cnt = get_usage(rse_id=rse_id, account=account) del cnt['updated_at'] assert_equal(cnt, {'files': 0, 'bytes': 0}) count, sum = 0, 0 for i in range(10): account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=2.147e+9) account_update(once=True) count += 1 sum += 2.147e+9 cnt = get_usage(rse_id=rse_id, account=account) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in range(4): account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=2.147e+9) account_update(once=True) count -= 1 sum -= 2.147e+9 cnt = get_usage(rse_id=rse_id, account=account) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in range(5): account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=2.147e+9) account_update(once=True) count += 1 sum += 2.147e+9 cnt = get_usage(rse_id=rse_id, account=account) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in range(8): account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=2.147e+9) account_update(once=True) count -= 1 sum -= 2.147e+9 cnt = get_usage(rse_id=rse_id, account=account) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum})
def test_2(self): # local quota not enough -> error copies = 2 rses = [self.rse_1, self.rse_2] set_local_account_limit(account=self.account, rse_id=self.mock1_id, bytes=10) increase(self.mock1_id, self.account, 10, 10) update_account_counter(account=self.account, rse_id=self.mock1_id) with assert_raises(InsufficientAccountLimit): RSESelector(self.account, rses, None, copies)
def test_5(self): # enough RSEs and local quota, but global quota missing -> 1 RSE copies = 1 rses = [self.rse_1, self.rse_2] set_global_account_limit(account=self.account, rse_expression=self.rse_1_name, bytes=10) increase(self.mock1_id, self.account, 10, 10) update_account_counter(account=self.account, rse_id=self.mock1_id) set_local_account_limit(account=self.account, rse_id=self.mock2_id, bytes=20) set_local_account_limit(account=self.account, rse_id=self.mock1_id, bytes=20) rse_selector = RSESelector(self.account, rses, None, copies) assert_equal(len(rse_selector.rses), 1)
def test_3(self): # global quota not enough -> error copies = 2 rses = [self.rse_1, self.rse_2] set_local_account_limit(account=self.account, rse_id=self.mock1_id, bytes=20) set_global_account_limit(account=self.account, rse_expression=self.rse_1_name, bytes=10) increase(self.mock1_id, self.account, 10, 10) update_account_counter(account=self.account, rse_id=self.mock1_id) with pytest.raises(InsufficientAccountLimit): RSESelector(self.account, rses, None, copies)
def test_inc_dec_get_counter(self): """ACCOUNT COUNTER (CORE): Increase, decrease and get counter """ account_update(once=True) rse_id = get_rse('MOCK').id account = 'jdoe' account_counter.del_counter(rse_id=rse_id, account=account) account_counter.add_counter(rse_id=rse_id, account=account) cnt = account_counter.get_counter(rse_id=rse_id, account=account) del cnt['updated_at'] assert_equal(cnt, {'files': 0, 'bytes': 0}) count, sum = 0, 0 for i in xrange(10): account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=2.147e+9) account_update(once=True) count += 1 sum += 2.147e+9 cnt = account_counter.get_counter(rse_id=rse_id, account=account) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in xrange(4): account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=2.147e+9) account_update(once=True) count -= 1 sum -= 2.147e+9 cnt = account_counter.get_counter(rse_id=rse_id, account=account) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in xrange(5): account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=2.147e+9) account_update(once=True) count += 1 sum += 2.147e+9 cnt = account_counter.get_counter(rse_id=rse_id, account=account) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in xrange(8): account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=2.147e+9) account_update(once=True) count -= 1 sum -= 2.147e+9 cnt = account_counter.get_counter(rse_id=rse_id, account=account) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum})
def set_metadata_bulk(self, scope, name, meta, recursive=False, session=None): did_query = session.query(models.DataIdentifier).with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').filter_by(scope=scope, name=name) if did_query.one_or_none() is None: raise exception.DataIdentifierNotFound("Data identifier '%s:%s' not found" % (scope, name)) remainder = {} for key, value in meta.items(): if key == 'lifetime': try: expired_at = None if value is not None: expired_at = datetime.utcnow() + timedelta(seconds=float(value)) rowcount = did_query.update({'expired_at': expired_at}, synchronize_session='fetch') except TypeError as error: raise exception.InvalidValueForKey(error) if not rowcount: # check for did presence raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name)) elif key in ['guid', 'events']: rowcount = did_query.filter_by(did_type=DIDType.FILE).update({key: value}, synchronize_session=False) if not rowcount: # check for did presence raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name)) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) if key == 'events': for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name): events = session.query(func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one()[0] session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update({'events': events}, synchronize_session=False) elif key == 'adler32': rowcount = did_query.filter_by(did_type=DIDType.FILE).update({key: value}, synchronize_session=False) if not rowcount: # check for did presence raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name)) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) elif key == 'bytes': rowcount = did_query.filter_by(did_type=DIDType.FILE).update({key: value}, synchronize_session=False) if not rowcount: # check for did presence raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name)) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) for account, bytes, rse_id, rule_id in session.query(models.ReplicaLock.account, models.ReplicaLock.bytes, models.ReplicaLock.rse_id, models.ReplicaLock.rule_id).filter_by(scope=scope, name=name): session.query(models.ReplicaLock).filter_by(scope=scope, name=name, rule_id=rule_id, rse_id=rse_id).update({key: value}, synchronize_session=False) account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=bytes, session=session) account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=value, session=session) for bytes, rse_id in session.query(models.RSEFileAssociation.bytes, models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name): session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).update({key: value}, synchronize_session=False) rse_counter.decrease(rse_id=rse_id, files=1, bytes=bytes, session=session) rse_counter.increase(rse_id=rse_id, files=1, bytes=value, session=session) for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name): values = {} values['length'], values['bytes'], values['events'] = session.query(func.count(models.DataIdentifierAssociation.scope), func.sum(models.DataIdentifierAssociation.bytes), func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one() session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update(values, synchronize_session=False) session.query(models.DatasetLock).filter_by(scope=parent_scope, name=parent_name).update({'length': values['length'], 'bytes': values['bytes']}, synchronize_session=False) else: remainder[key] = value if remainder: try: rowcount = did_query.update(remainder, synchronize_session='fetch') except CompileError as error: raise exception.InvalidMetadata(error) except InvalidRequestError: raise exception.InvalidMetadata("Some of the keys are not accepted: " + str(list(remainder.keys()))) if not rowcount: raise exception.UnsupportedOperation('Some of the keys for %s:%s cannot be updated: %s' % (scope, name, str(list(remainder.keys())))) # propagate metadata updates to child content if recursive: content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').filter_by(scope=scope, name=name) for child_scope, child_name in content_query: try: child_did_query = session.query(models.DataIdentifier).with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').filter_by(scope=child_scope, name=child_name) child_did_query.update(remainder, synchronize_session='fetch') except CompileError as error: raise exception.InvalidMetadata(error) except InvalidRequestError: raise exception.InvalidMetadata("Some of the keys are not accepted recursively: " + str(list(remainder.keys())))
def set_metadata(self, scope, name, key, value, recursive=False, session=None): """ Add metadata to data identifier. :param scope: The scope name. :param name: The data identifier name. :param key: the key. :param value: the value. :param did: The data identifier info. :param recursive: Option to propagate the metadata change to content. :param session: The database session in use. """ try: rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').one() except NoResultFound: raise exception.DataIdentifierNotFound("Data identifier '%s:%s' not found" % (scope, name)) if key == 'lifetime': try: expired_at = None if value is not None: expired_at = datetime.utcnow() + timedelta(seconds=float(value)) rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name).update({'expired_at': expired_at}, synchronize_session='fetch') except TypeError as error: raise exception.InvalidValueForKey(error) elif key in ['guid', 'events']: rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) if key == 'events': for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name): events = session.query(func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one()[0] session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update({'events': events}, synchronize_session=False) elif key == 'adler32': rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) elif key == 'bytes': rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) for account, bytes, rse_id, rule_id in session.query(models.ReplicaLock.account, models.ReplicaLock.bytes, models.ReplicaLock.rse_id, models.ReplicaLock.rule_id).filter_by(scope=scope, name=name): session.query(models.ReplicaLock).filter_by(scope=scope, name=name, rule_id=rule_id, rse_id=rse_id).update({key: value}, synchronize_session=False) account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=bytes, session=session) account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=value, session=session) for bytes, rse_id in session.query(models.RSEFileAssociation.bytes, models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name): session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).update({key: value}, synchronize_session=False) rse_counter.decrease(rse_id=rse_id, files=1, bytes=bytes, session=session) rse_counter.increase(rse_id=rse_id, files=1, bytes=value, session=session) for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name): values = {} values['length'], values['bytes'], values['events'] = session.query(func.count(models.DataIdentifierAssociation.scope), func.sum(models.DataIdentifierAssociation.bytes), func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one() session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update(values, synchronize_session=False) session.query(models.DatasetLock).filter_by(scope=parent_scope, name=parent_name).update({'length': values['length'], 'bytes': values['bytes']}, synchronize_session=False) else: try: rowcount = session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter_by(scope=scope, name=name).\ update({key: value}, synchronize_session='fetch') except CompileError as error: raise exception.InvalidMetadata(error) except InvalidRequestError: raise exception.InvalidMetadata("Key %s is not accepted" % key) # propagate metadata updates to child content if recursive: content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter_by(scope=scope, name=name) for child_scope, child_name in content_query: try: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter_by(scope=child_scope, name=child_name).\ update({key: value}, synchronize_session='fetch') except CompileError as error: raise exception.InvalidMetadata(error) except InvalidRequestError: raise exception.InvalidMetadata("Key %s is not accepted" % key) if not rowcount: # check for did presence raise exception.UnsupportedOperation('%(key)s for %(scope)s:%(name)s cannot be updated' % locals())
def test_api_rse(self): """ RSE (API): Test external representation of RSEs """ out = api_rse.get_rse(self.rse_name, **self.vo) assert out['rse'] == self.rse_name assert out['id'] == self.rse_id out = api_rse.list_rses(**self.new_vo) out = list(out) assert 0 != len(out) rse_ids = [rse['id'] for rse in out] assert self.rse3_id in rse_ids assert self.rse4_id in rse_ids for rse in out: assert 'rse' in rse if rse['id'] == self.rse3_id: assert rse['rse'] == self.rse3_name elif rse['id'] == self.rse4_id: assert rse['rse'] == self.rse4_name key = "KEY_" + generate_uuid() api_rse.add_rse_attribute(self.rse_name, key, 1, issuer='root', **self.vo) out = api_rse.get_rses_with_attribute(key) out = list(out) assert 0 != len(out) for rse in out: assert rse['rse'] == self.rse_name out = api_rse.get_rse_protocols(self.rse_name, issuer='root', **self.vo) assert out['rse'] == self.rse_name # add some account and RSE counters rse_mock = 'MOCK4' rse_mock_id = get_rse_id(rse_mock, **self.vo) account_counter.del_counter(rse_id=rse_mock_id, account=self.account) account_counter.add_counter(rse_id=rse_mock_id, account=self.account) account_counter.increase(rse_id=rse_mock_id, account=self.account, files=1, bytes=10) account_counter.update_account_counter(self.account, rse_mock_id) did = 'file_' + generate_uuid() add_did(self.scope_name, did, 'DATASET', 'root', account=self.account_name, rse=rse_mock, **self.vo) abacus_rse.run(once=True) out = api_rse.get_rse_usage(rse_mock, per_account=True, issuer='root', **self.vo) assert rse_mock_id in [o['rse_id'] for o in out] for usage in out: if usage['rse_id'] == rse_mock_id: assert usage['rse'] == rse_mock accounts = [u['account'] for u in usage['account_usages']] assert self.account_name in accounts if self.multi_vo: assert self.account.internal not in accounts # clean up files cleaner.run(once=True) if self.multi_vo: reaper.run(once=True, include_rses='vo=%s&(%s)' % (self.vo['vo'], rse_mock), greedy=True) else: reaper.run(once=True, include_rses=rse_mock, greedy=True) abacus_rse.run(once=True) out = api_rse.parse_rse_expression( '%s|%s' % (self.rse_name, self.rse2_name), **self.vo) assert self.rse_name in out assert self.rse2_name in out assert self.rse_id not in out assert self.rse2_id not in out
if not rowcount: raise exception.UnsupportedOperation('%(key)s for %(scope)s:%(name)s cannot be updated' % locals()) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) elif key == 'bytes': rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False) if not rowcount: raise exception.UnsupportedOperation('%(key)s for %(scope)s:%(name)s cannot be updated' % locals()) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) for account, bytes, rse_id, rule_id in session.query(models.ReplicaLock.account, models.ReplicaLock.bytes, models.ReplicaLock.rse_id).filter_by(scope=scope, name=name): session.query(models.ReplicaLock).filter_by(scope=scope, name=name, rule_id=rule_id, rse_id=rse_id).update({key: value}, synchronize_session=False) account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=bytes, session=session) account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=value, session=session) for bytes, rse_id in session.query(models.RSEFileAssociation.bytes, models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name): session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).update({key: value}, synchronize_session=False) rse_counter.decrease(rse_id=rse_id, files=1, bytes=bytes, session=session) rse_counter.increase(rse_id=rse_id, files=1, bytes=value, session=session) for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name): values = {} values['length'], values['bytes'], values['events'] = session.query(func.count(models.DataIdentifierAssociation.scope), func.sum(models.DataIdentifierAssociation.bytes), func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one() session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update(values, synchronize_session=False) session.query(models.DatasetLock).filter_by(scope=parent_scope, name=parent_name).update({'length': values['length'], 'bytes': values['bytes']}, synchronize_session=False) else: