def test_inc_dec_get_counter(self): """ RSE COUNTER (CORE): Increase, decrease and get counter """ rse_id = get_rse_id(rse='MOCK', **self.vo) rse_update(once=True) rse_counter.del_counter(rse_id=rse_id) rse_counter.add_counter(rse_id=rse_id) cnt = rse_counter.get_counter(rse_id=rse_id) del cnt['updated_at'] assert_equal(cnt, {'files': 0, 'bytes': 0}) count, sum = 0, 0 for i in range(10): rse_counter.increase(rse_id=rse_id, files=1, bytes=2.147e+9) rse_update(once=True) count += 1 sum += 2.147e+9 cnt = rse_counter.get_counter(rse_id=rse_id) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in range(4): rse_counter.decrease(rse_id=rse_id, files=1, bytes=2.147e+9) rse_update(once=True) count -= 1 sum -= 2.147e+9 cnt = rse_counter.get_counter(rse_id=rse_id) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in range(5): rse_counter.increase(rse_id=rse_id, files=1, bytes=2.147e+9) rse_update(once=True) count += 1 sum += 2.147e+9 cnt = rse_counter.get_counter(rse_id=rse_id) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in range(8): rse_counter.decrease(rse_id=rse_id, files=1, bytes=2.147e+9) rse_update(once=True) count -= 1 sum -= 2.147e+9 cnt = rse_counter.get_counter(rse_id=rse_id) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum})
def test_inc_dec_get_counter(self): """ RSE COUNTER (CORE): Increase, decrease and get counter """ rse_id = get_rse('MOCK').id rse_update(once=True) rse_counter.del_counter(rse_id=rse_id) rse_counter.add_counter(rse_id=rse_id) cnt = rse_counter.get_counter(rse_id=rse_id) del cnt['updated_at'] assert_equal(cnt, {'files': 0, 'bytes': 0}) count, sum = 0, 0 for i in xrange(10): rse_counter.increase(rse_id=rse_id, files=1, bytes=2.147e+9) rse_update(once=True) count += 1 sum += 2.147e+9 cnt = rse_counter.get_counter(rse_id=rse_id) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in xrange(4): rse_counter.decrease(rse_id=rse_id, files=1, bytes=2.147e+9) rse_update(once=True) count -= 1 sum -= 2.147e+9 cnt = rse_counter.get_counter(rse_id=rse_id) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in xrange(5): rse_counter.increase(rse_id=rse_id, files=1, bytes=2.147e+9) rse_update(once=True) count += 1 sum += 2.147e+9 cnt = rse_counter.get_counter(rse_id=rse_id) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum}) for i in xrange(8): rse_counter.decrease(rse_id=rse_id, files=1, bytes=2.147e+9) rse_update(once=True) count -= 1 sum -= 2.147e+9 cnt = rse_counter.get_counter(rse_id=rse_id) del cnt['updated_at'] assert_equal(cnt, {'files': count, 'bytes': sum})
def add_replicas(rse, files, account, rse_id=None, ignore_availability=True, session=None): """ Bulk add file replicas. :param rse: The rse name. :param files: The list of files. :param account: The account owner. :param rse_id: The RSE id. To be used if rse parameter is None. :param ignore_availability: Ignore the RSE blacklisting. :param session: The database session in use. :returns: True is successful. """ if rse: replica_rse = get_rse(rse=rse, session=session) else: replica_rse = get_rse(rse=None, rse_id=rse_id, session=session) if (not (replica_rse.availability & 2)) and not ignore_availability: raise exception.RessourceTemporaryUnavailable('%s is temporary unavailable for writing' % rse) replicas = __bulk_add_file_dids(files=files, account=account, session=session) if not replica_rse.deterministic: pfns, scheme = list(), None for file in files: if 'pfn' not in file: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %(rse)s ' % locals()) else: scheme = file['pfn'].split(':')[0] pfns.append(file['pfn']) p = rsemgr.create_protocol(rse_settings=rsemgr.get_rse_info(rse, session=session), operation='write', scheme=scheme) pfns = p.parse_pfns(pfns=pfns) for file in files: tmp = pfns[file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) nbfiles, bytes = __bulk_add_replicas(rse_id=replica_rse.id, files=files, account=account, session=session) increase(rse_id=replica_rse.id, files=nbfiles, bytes=bytes, session=session) return replicas
def set_metadata_bulk(self, scope, name, meta, recursive=False, session=None): did_query = session.query(models.DataIdentifier).with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').filter_by(scope=scope, name=name) if did_query.one_or_none() is None: raise exception.DataIdentifierNotFound("Data identifier '%s:%s' not found" % (scope, name)) remainder = {} for key, value in meta.items(): if key == 'lifetime': try: expired_at = None if value is not None: expired_at = datetime.utcnow() + timedelta(seconds=float(value)) rowcount = did_query.update({'expired_at': expired_at}, synchronize_session='fetch') except TypeError as error: raise exception.InvalidValueForKey(error) if not rowcount: # check for did presence raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name)) elif key in ['guid', 'events']: rowcount = did_query.filter_by(did_type=DIDType.FILE).update({key: value}, synchronize_session=False) if not rowcount: # check for did presence raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name)) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) if key == 'events': for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name): events = session.query(func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one()[0] session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update({'events': events}, synchronize_session=False) elif key == 'adler32': rowcount = did_query.filter_by(did_type=DIDType.FILE).update({key: value}, synchronize_session=False) if not rowcount: # check for did presence raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name)) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) elif key == 'bytes': rowcount = did_query.filter_by(did_type=DIDType.FILE).update({key: value}, synchronize_session=False) if not rowcount: # check for did presence raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name)) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) for account, bytes, rse_id, rule_id in session.query(models.ReplicaLock.account, models.ReplicaLock.bytes, models.ReplicaLock.rse_id, models.ReplicaLock.rule_id).filter_by(scope=scope, name=name): session.query(models.ReplicaLock).filter_by(scope=scope, name=name, rule_id=rule_id, rse_id=rse_id).update({key: value}, synchronize_session=False) account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=bytes, session=session) account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=value, session=session) for bytes, rse_id in session.query(models.RSEFileAssociation.bytes, models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name): session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).update({key: value}, synchronize_session=False) rse_counter.decrease(rse_id=rse_id, files=1, bytes=bytes, session=session) rse_counter.increase(rse_id=rse_id, files=1, bytes=value, session=session) for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name): values = {} values['length'], values['bytes'], values['events'] = session.query(func.count(models.DataIdentifierAssociation.scope), func.sum(models.DataIdentifierAssociation.bytes), func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one() session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update(values, synchronize_session=False) session.query(models.DatasetLock).filter_by(scope=parent_scope, name=parent_name).update({'length': values['length'], 'bytes': values['bytes']}, synchronize_session=False) else: remainder[key] = value if remainder: try: rowcount = did_query.update(remainder, synchronize_session='fetch') except CompileError as error: raise exception.InvalidMetadata(error) except InvalidRequestError: raise exception.InvalidMetadata("Some of the keys are not accepted: " + str(list(remainder.keys()))) if not rowcount: raise exception.UnsupportedOperation('Some of the keys for %s:%s cannot be updated: %s' % (scope, name, str(list(remainder.keys())))) # propagate metadata updates to child content if recursive: content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').filter_by(scope=scope, name=name) for child_scope, child_name in content_query: try: child_did_query = session.query(models.DataIdentifier).with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').filter_by(scope=child_scope, name=child_name) child_did_query.update(remainder, synchronize_session='fetch') except CompileError as error: raise exception.InvalidMetadata(error) except InvalidRequestError: raise exception.InvalidMetadata("Some of the keys are not accepted recursively: " + str(list(remainder.keys())))
def set_metadata(self, scope, name, key, value, recursive=False, session=None): """ Add metadata to data identifier. :param scope: The scope name. :param name: The data identifier name. :param key: the key. :param value: the value. :param did: The data identifier info. :param recursive: Option to propagate the metadata change to content. :param session: The database session in use. """ try: rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').one() except NoResultFound: raise exception.DataIdentifierNotFound("Data identifier '%s:%s' not found" % (scope, name)) if key == 'lifetime': try: expired_at = None if value is not None: expired_at = datetime.utcnow() + timedelta(seconds=float(value)) rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name).update({'expired_at': expired_at}, synchronize_session='fetch') except TypeError as error: raise exception.InvalidValueForKey(error) elif key in ['guid', 'events']: rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) if key == 'events': for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name): events = session.query(func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one()[0] session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update({'events': events}, synchronize_session=False) elif key == 'adler32': rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) elif key == 'bytes': rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) for account, bytes, rse_id, rule_id in session.query(models.ReplicaLock.account, models.ReplicaLock.bytes, models.ReplicaLock.rse_id, models.ReplicaLock.rule_id).filter_by(scope=scope, name=name): session.query(models.ReplicaLock).filter_by(scope=scope, name=name, rule_id=rule_id, rse_id=rse_id).update({key: value}, synchronize_session=False) account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=bytes, session=session) account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=value, session=session) for bytes, rse_id in session.query(models.RSEFileAssociation.bytes, models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name): session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).update({key: value}, synchronize_session=False) rse_counter.decrease(rse_id=rse_id, files=1, bytes=bytes, session=session) rse_counter.increase(rse_id=rse_id, files=1, bytes=value, session=session) for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name): values = {} values['length'], values['bytes'], values['events'] = session.query(func.count(models.DataIdentifierAssociation.scope), func.sum(models.DataIdentifierAssociation.bytes), func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one() session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update(values, synchronize_session=False) session.query(models.DatasetLock).filter_by(scope=parent_scope, name=parent_name).update({'length': values['length'], 'bytes': values['bytes']}, synchronize_session=False) else: try: rowcount = session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter_by(scope=scope, name=name).\ update({key: value}, synchronize_session='fetch') except CompileError as error: raise exception.InvalidMetadata(error) except InvalidRequestError: raise exception.InvalidMetadata("Key %s is not accepted" % key) # propagate metadata updates to child content if recursive: content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter_by(scope=scope, name=name) for child_scope, child_name in content_query: try: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter_by(scope=child_scope, name=child_name).\ update({key: value}, synchronize_session='fetch') except CompileError as error: raise exception.InvalidMetadata(error) except InvalidRequestError: raise exception.InvalidMetadata("Key %s is not accepted" % key) if not rowcount: # check for did presence raise exception.UnsupportedOperation('%(key)s for %(scope)s:%(name)s cannot be updated' % locals())
elif key == 'bytes': rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False) if not rowcount: raise exception.UnsupportedOperation('%(key)s for %(scope)s:%(name)s cannot be updated' % locals()) session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False) session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False) for account, bytes, rse_id, rule_id in session.query(models.ReplicaLock.account, models.ReplicaLock.bytes, models.ReplicaLock.rse_id).filter_by(scope=scope, name=name): session.query(models.ReplicaLock).filter_by(scope=scope, name=name, rule_id=rule_id, rse_id=rse_id).update({key: value}, synchronize_session=False) account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=bytes, session=session) account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=value, session=session) for bytes, rse_id in session.query(models.RSEFileAssociation.bytes, models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name): session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).update({key: value}, synchronize_session=False) rse_counter.decrease(rse_id=rse_id, files=1, bytes=bytes, session=session) rse_counter.increase(rse_id=rse_id, files=1, bytes=value, session=session) for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name): values = {} values['length'], values['bytes'], values['events'] = session.query(func.count(models.DataIdentifierAssociation.scope), func.sum(models.DataIdentifierAssociation.bytes), func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one() session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update(values, synchronize_session=False) session.query(models.DatasetLock).filter_by(scope=parent_scope, name=parent_name).update({'length': values['length'], 'bytes': values['bytes']}, synchronize_session=False) else: try: session.query(models.DataIdentifier).filter_by(scope=scope, name=name).update({key: value}, synchronize_session='fetch') # add DIDtype except CompileError, e: raise exception.InvalidMetadata(e)