Example #1
0
def delete_volatile_replicas(rse, replicas, session=None):
    """
    Bulk delete volatile replicas.

    :param rse: the rse name.
    :param replicas: the list of volatile replicas.
    :param session: The database session in use.
    :returns: True is successful.
    """
    # first check that the rse is a volatile one
    try:
        rse_id = session.query(models.RSE.id).filter_by(rse=rse, volatile=True).one()[0]
    except NoResultFound:
        raise exception.UnsupportedOperation('No volatile rse found for %(rse)s !' % locals())

    conditions = []
    for replica in replicas:
        conditions.append(and_(models.RSEFileAssociation.scope == replica['scope'],
                               models.RSEFileAssociation.name == replica['name']))

    if conditions:
        session.query(models.RSEFileAssociation).\
            filter(models.RSEFileAssociation.rse_id == rse_id).\
            filter(or_(*conditions)).\
            delete(synchronize_session=False)
Example #2
0
def list_vos(session=None):
    """
    List all the VOs in the db.

    :param session: The db session in use.
    :returns: List of VO dictionaries.
    """
    if not config_get_bool(
            'common', 'multi_vo', raise_exception=False, default=False):
        raise exception.UnsupportedOperation(
            'VO operations cannot be performed in single VO mode.')

    query = session.query(models.VO)

    vos = []
    for vo in query.all():
        vo_dict = {
            'vo': vo.vo,
            'description': vo.description,
            'email': vo.email,
            'created_at': vo.created_at,
            'updated_at': vo.updated_at
        }
        vos.append(vo_dict)

    return vos
Example #3
0
    def list_dids(self,
                  scope,
                  filters,
                  did_type='collection',
                  ignore_case=False,
                  limit=None,
                  offset=None,
                  long=False,
                  recursive=False,
                  ignore_dids=None,
                  session=None):
        if not ignore_dids:
            ignore_dids = set()

        # backwards compatability for filters as single {}.
        if isinstance(filters, dict):
            filters = [filters]

        # instantiate fe and create mongo query
        fe = FilterEngine(filters, model_class=None, strict_coerce=False)
        mongo_query_str = fe.create_mongo_query(
            additional_filters=[('scope', operator.eq,
                                 scope.internal), ('vo', operator.eq,
                                                   scope.vo)])

        if recursive:
            # TODO: possible, but requires retrieving the results of a concurrent sqla query to call list_content on for datasets and containers
            raise exception.UnsupportedOperation(
                "'{}' metadata module does not currently support recursive searches"
                .format(self.plugin_name.lower()))

        if long:
            query_result = self.col.find(mongo_query_str)
            if limit:
                query_result = query_result.limit(limit)
            for did in query_result:
                did_full = did_full = "{}:{}".format(did['scope'], did['name'])
                if did_full not in ignore_dids:  # aggregating recursive queries may contain duplicate DIDs
                    ignore_dids.add(did_full)
                    yield {
                        'scope': InternalScope(did['scope']),
                        'name': did['name'],
                        'did_type': "N/A",
                        'bytes': "N/A",
                        'length': "N/A"
                    }
        else:
            query_result = self.col.find(mongo_query_str)
            if limit:
                query_result = query_result.limit(limit)
            for did in query_result:
                did_full = did_full = "{}:{}".format(did['scope'], did['name'])
                if did_full not in ignore_dids:  # aggregating recursive queries may contain duplicate DIDs
                    ignore_dids.add(did_full)
                    yield did['name']
Example #4
0
def add_vo(vo, description, email, session=None):
    """
    Add a VO and setup a new root user.
    New root user will have account name 'root' and a userpass identity with username: '******' and password: '******'

    :param vo: 3-letter unique tag for a VO.
    :param descrition: Descriptive string for the VO (e.g. Full name).
    :param email: Contact email for the VO.
    :param session: The db session in use.
    """
    if not config_get_bool(
            'common', 'multi_vo', raise_exception=False, default=False):
        raise exception.UnsupportedOperation(
            'VO operations cannot be performed in single VO mode.')

    if len(vo) != 3:
        raise exception.RucioException('Invalid VO tag, must be 3 chars.')

    new_vo = models.VO(vo=vo, description=description, email=email)

    try:
        new_vo.save(session=session)
    except IntegrityError:
        raise exception.Duplicate('VO {} already exists!'.format(vo))
    except DatabaseError as error:
        raise exception.RucioException(error.args)

    from rucio.core.account import add_account, list_identities
    from rucio.core.identity import add_account_identity
    new_root = InternalAccount('root', vo=vo)
    add_account(account=new_root,
                type_=AccountType['SERVICE'],
                email=email,
                session=session)
    add_account_identity(identity='root@{}'.format(vo),
                         type_=IdentityType['USERPASS'],
                         account=new_root,
                         email=email,
                         default=False,
                         password='******',
                         session=session)

    for ident in list_identities(account=InternalAccount('super_root',
                                                         vo='def'),
                                 session=session):
        add_account_identity(identity=ident['identity'],
                             type_=ident['type'],
                             account=new_root,
                             email='',
                             session=session)
Example #5
0
def add_volatile_replicas(rse, replicas, session=None):
    """
    Bulk add volatile replicas.

    :param rse: the rse name.
    :param replicas: the list of volatile replicas.
    :param session: The database session in use.
    :returns: True is successful.
    """
    # first check that the rse is a volatile one
    try:
        rse_id = session.query(models.RSE.id).filter_by(rse=rse, volatile=True).one()[0]
    except NoResultFound:
        raise exception.UnsupportedOperation('No volatile rse found for %(rse)s !' % locals())

    file_clause, replica_clause = [], []
    for replica in replicas:
        file_clause.append(and_(models.DataIdentifier.scope == replica['scope'],
                                models.DataIdentifier.name == replica['name'],
                                ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where(and_(models.RSEFileAssociation.scope == replica['scope'],
                                                                                                                                    models.RSEFileAssociation.name == replica['name'],
                                                                                                                                    models.RSEFileAssociation.rse_id == rse_id))))
        replica_clause.append(and_(models.RSEFileAssociation.scope == replica['scope'],
                                   models.RSEFileAssociation.name == replica['name'],
                                   models.RSEFileAssociation.rse_id == rse_id))

    if replica_clause:
        now = datetime.utcnow()
        session.query(models.RSEFileAssociation).\
            with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
            filter(or_(*replica_clause)).\
            update({'updated_at': now, 'tombstone': now}, synchronize_session=False)

    if file_clause:
        file_query = session.query(models.DataIdentifier.scope,
                                   models.DataIdentifier.name,
                                   models.DataIdentifier.bytes,
                                   models.DataIdentifier.md5,
                                   models.DataIdentifier.adler32).\
            filter(or_(*file_clause))

        session.bulk_insert_mappings(
            models.RSEFileAssociation,
            [{'rse_id': rse_id, 'adler32': adler32, 'state': ReplicaState.AVAILABLE,
              'scope': scope, 'name': name, 'lock_cnt': 0, 'tombstone': datetime.utcnow(),
              'bytes': bytes, 'md5': md5} for scope, name, bytes, md5, adler32 in file_query])
Example #6
0
def update_vo(vo, parameters, session=None):
    """
    Update VO properties (email, description).

    :param vo: The VO to update.
    :param parameters: A dictionary with the new properties.
    :param session: The db session in use.
    """
    if not config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
        raise exception.UnsupportedOperation('VO operations cannot be performed in single VO mode.')

    try:
        query = session.query(models.VO).filter_by(vo=vo).one()
    except NoResultFound:
        raise exception.VONotFound('VO {} not found'.format(vo))
    param = {}
    for key in parameters:
        if key in ['email', 'description']:
            param[key] = parameters[key]
    query.update(param)
Example #7
0
    def list_dids(self, scope, filters, type='collection', ignore_case=False, limit=None,
                  offset=None, long=False, recursive=False, session=None):
        """
        Search data identifiers

        :param scope: the scope name.
        :param filters: dictionary of attributes by which the results should be filtered.
        :param type: the type of the did: all(container, dataset, file), collection(dataset or container), dataset, container, file.
        :param ignore_case: ignore case distinctions.
        :param limit: limit number.
        :param offset: offset number.
        :param long: Long format option to display more information for each DID.
        :param session: The database session in use.
        :param recursive: Recursively list DIDs content.
        """
        types = ['all', 'collection', 'container', 'dataset', 'file']
        if type not in types:
            raise exception.UnsupportedOperation("Valid type are: %(types)s" % locals())

        query = session.query(models.DataIdentifier.scope,
                              models.DataIdentifier.name,
                              models.DataIdentifier.did_type,
                              models.DataIdentifier.bytes,
                              models.DataIdentifier.length).\
            filter(models.DataIdentifier.scope == scope)

        # Exclude suppressed dids
        query = query.filter(models.DataIdentifier.suppressed != true())

        if type == 'all':
            query = query.filter(or_(models.DataIdentifier.did_type == DIDType.CONTAINER,
                                     models.DataIdentifier.did_type == DIDType.DATASET,
                                     models.DataIdentifier.did_type == DIDType.FILE))
        elif type.lower() == 'collection':
            query = query.filter(or_(models.DataIdentifier.did_type == DIDType.CONTAINER,
                                     models.DataIdentifier.did_type == DIDType.DATASET))
        elif type.lower() == 'container':
            query = query.filter(models.DataIdentifier.did_type == DIDType.CONTAINER)
        elif type.lower() == 'dataset':
            query = query.filter(models.DataIdentifier.did_type == DIDType.DATASET)
        elif type.lower() == 'file':
            query = query.filter(models.DataIdentifier.did_type == DIDType.FILE)

        for (k, v) in filters.items():

            if k not in ['created_before', 'created_after', 'length.gt', 'length.lt', 'length.lte', 'length.gte', 'length'] \
                    and not hasattr(models.DataIdentifier, k):
                raise exception.KeyNotFound(k)

            if isinstance(v, string_types) and ('*' in v or '%' in v):
                if v in ('*', '%', u'*', u'%'):
                    continue
                if session.bind.dialect.name == 'postgresql':
                    query = query.filter(getattr(models.DataIdentifier, k).
                                        like(v.replace('*', '%').replace('_', '\_'),  # NOQA: W605
                                            escape='\\'))
                else:
                    query = query.filter(getattr(models.DataIdentifier, k).
                                        like(v.replace('*', '%').replace('_', '\_'), escape='\\'))  # NOQA: W605
            elif k == 'created_before':
                created_before = str_to_date(v)
                query = query.filter(models.DataIdentifier.created_at <= created_before)
            elif k == 'created_after':
                created_after = str_to_date(v)
                query = query.filter(models.DataIdentifier.created_at >= created_after)
            elif k == 'guid':
                query = query.filter_by(guid=v).\
                    with_hint(models.ReplicaLock, "INDEX(DIDS_GUIDS_IDX)", 'oracle')
            elif k == 'length.gt':
                query = query.filter(models.DataIdentifier.length > v)
            elif k == 'length.lt':
                query = query.filter(models.DataIdentifier.length < v)
            elif k == 'length.gte':
                query = query.filter(models.DataIdentifier.length >= v)
            elif k == 'length.lte':
                query = query.filter(models.DataIdentifier.length <= v)
            elif k == 'length':
                query = query.filter(models.DataIdentifier.length == v)
            else:
                query = query.filter(getattr(models.DataIdentifier, k) == v)

        if 'name' in filters:
            if '*' in filters['name']:
                query = query.\
                    with_hint(models.DataIdentifier, "NO_INDEX(dids(SCOPE,NAME))", 'oracle')
            else:
                query = query.\
                    with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')

        if limit:
            query = query.limit(limit)

        if recursive:
            # Get attachted DIDs and save in list because query has to be finished before starting a new one in the recursion
            collections_content = []
            parent_scope = scope

            from rucio.core.did import list_content

            for scope, name, did_type, bytes, length in query.yield_per(100):
                if (did_type == DIDType.CONTAINER or did_type == DIDType.DATASET):
                    collections_content += [did for did in list_content(scope=scope, name=name)]

            # List DIDs again to use filter
            for did in collections_content:
                filters['name'] = did['name']
                for result in self.list_dids(scope=did['scope'], filters=filters, recursive=True, type=type, limit=limit, offset=offset, long=long, session=session):
                    yield result

        if long:
            for scope, name, did_type, bytes, length in query.yield_per(5):
                yield {'scope': scope,
                       'name': name,
                       'did_type': str(did_type),
                       'bytes': bytes,
                       'length': length}
        else:
            for scope, name, did_type, bytes, length in query.yield_per(5):
                yield name
Example #8
0
    def set_metadata_bulk(self, scope, name, meta, recursive=False, session=None):
        did_query = session.query(models.DataIdentifier).with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').filter_by(scope=scope, name=name)
        if did_query.one_or_none() is None:
            raise exception.DataIdentifierNotFound("Data identifier '%s:%s' not found" % (scope, name))

        remainder = {}
        for key, value in meta.items():
            if key == 'lifetime':
                try:
                    expired_at = None
                    if value is not None:
                        expired_at = datetime.utcnow() + timedelta(seconds=float(value))
                    rowcount = did_query.update({'expired_at': expired_at}, synchronize_session='fetch')
                except TypeError as error:
                    raise exception.InvalidValueForKey(error)
                if not rowcount:
                    # check for did presence
                    raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name))
            elif key in ['guid', 'events']:
                rowcount = did_query.filter_by(did_type=DIDType.FILE).update({key: value}, synchronize_session=False)
                if not rowcount:
                    # check for did presence
                    raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name))

                session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False)
                if key == 'events':
                    for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name):
                        events = session.query(func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one()[0]
                        session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update({'events': events}, synchronize_session=False)

            elif key == 'adler32':
                rowcount = did_query.filter_by(did_type=DIDType.FILE).update({key: value}, synchronize_session=False)
                if not rowcount:
                    # check for did presence
                    raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name))

                session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False)
                session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False)
                session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False)
            elif key == 'bytes':
                rowcount = did_query.filter_by(did_type=DIDType.FILE).update({key: value}, synchronize_session=False)
                if not rowcount:
                    # check for did presence
                    raise exception.UnsupportedOperation('%s for %s:%s cannot be updated' % (key, scope, name))

                session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False)
                session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False)

                for account, bytes, rse_id, rule_id in session.query(models.ReplicaLock.account, models.ReplicaLock.bytes, models.ReplicaLock.rse_id, models.ReplicaLock.rule_id).filter_by(scope=scope, name=name):
                    session.query(models.ReplicaLock).filter_by(scope=scope, name=name, rule_id=rule_id, rse_id=rse_id).update({key: value}, synchronize_session=False)
                    account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=bytes, session=session)
                    account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=value, session=session)

                for bytes, rse_id in session.query(models.RSEFileAssociation.bytes, models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name):
                    session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).update({key: value}, synchronize_session=False)
                    rse_counter.decrease(rse_id=rse_id, files=1, bytes=bytes, session=session)
                    rse_counter.increase(rse_id=rse_id, files=1, bytes=value, session=session)

                for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name):
                    values = {}
                    values['length'], values['bytes'], values['events'] = session.query(func.count(models.DataIdentifierAssociation.scope),
                                                                                        func.sum(models.DataIdentifierAssociation.bytes),
                                                                                        func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one()
                    session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update(values, synchronize_session=False)
                    session.query(models.DatasetLock).filter_by(scope=parent_scope, name=parent_name).update({'length': values['length'], 'bytes': values['bytes']}, synchronize_session=False)
            else:
                remainder[key] = value

        if remainder:
            try:
                rowcount = did_query.update(remainder, synchronize_session='fetch')
            except CompileError as error:
                raise exception.InvalidMetadata(error)
            except InvalidRequestError:
                raise exception.InvalidMetadata("Some of the keys are not accepted: " + str(list(remainder.keys())))
            if not rowcount:
                raise exception.UnsupportedOperation('Some of the keys for %s:%s cannot be updated: %s' % (scope, name, str(list(remainder.keys()))))

            # propagate metadata updates to child content
            if recursive:
                content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name)
                content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').filter_by(scope=scope, name=name)

                for child_scope, child_name in content_query:
                    try:
                        child_did_query = session.query(models.DataIdentifier).with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').filter_by(scope=child_scope, name=child_name)
                        child_did_query.update(remainder, synchronize_session='fetch')
                    except CompileError as error:
                        raise exception.InvalidMetadata(error)
                    except InvalidRequestError:
                        raise exception.InvalidMetadata("Some of the keys are not accepted recursively: " + str(list(remainder.keys())))
Example #9
0
    def list_dids(self, scope, filters, did_type='collection', ignore_case=False, limit=None,
                  offset=None, long=False, recursive=False, ignore_dids=None, session=None):
        """
        Search data identifiers.

        :param scope: the scope name.
        :param filters: dictionary of attributes by which the results should be filtered.
        :param did_type: the type of the did: all(container, dataset, file), collection(dataset or container), dataset, container, file.
        :param ignore_case: ignore case distinctions.
        :param limit: limit number.
        :param offset: offset number.
        :param long: Long format option to display more information for each DID.
        :param session: The database session in use.
        :param recursive: Recursively list DIDs content.
        :param ignore_dids: List of DIDs to refrain from yielding.
        """
        if not ignore_dids:
            ignore_dids = set()

        # mapping for semantic <type> to a (set of) recognised DIDType(s).
        type_to_did_type_mapping = {
            'all': [DIDType.CONTAINER, DIDType.DATASET, DIDType.FILE],
            'collection': [DIDType.CONTAINER, DIDType.DATASET],
            'container': [DIDType.CONTAINER],
            'dataset': [DIDType.DATASET],
            'file': [DIDType.FILE]
        }

        # backwards compatability for filters as single {}.
        if isinstance(filters, dict):
            filters = [filters]

        # for each or_group, make sure there is a mapped "did_type" filter.
        # if type maps to many DIDTypes, the corresponding or_group will be copied the required number of times to satisfy all the logical possibilities.
        filters_tmp = []
        for or_group in filters:
            if 'type' not in or_group:
                or_group_type = did_type.lower()
            else:
                or_group_type = or_group.pop('type').lower()
            if or_group_type not in type_to_did_type_mapping.keys():
                raise exception.UnsupportedOperation('{} is not a valid type. Valid types are {}'.format(or_group_type, type_to_did_type_mapping.keys()))

            for mapped_did_type in type_to_did_type_mapping[or_group_type]:
                or_group['did_type'] = mapped_did_type
                filters_tmp.append(or_group.copy())
        filters = filters_tmp

        # instantiate fe and create sqla query
        fe = FilterEngine(filters, model_class=models.DataIdentifier)
        query = fe.create_sqla_query(
            additional_model_attributes=[
                models.DataIdentifier.scope,
                models.DataIdentifier.name,
                models.DataIdentifier.did_type,
                models.DataIdentifier.bytes,
                models.DataIdentifier.length
            ], additional_filters=[
                (models.DataIdentifier.scope, operator.eq, scope),
                (models.DataIdentifier.suppressed, operator.ne, true())
            ]
        )

        if limit:
            query = query.limit(limit)
        if recursive:
            from rucio.core.did import list_content

            # Get attached DIDs and save in list because query has to be finished before starting a new one in the recursion
            collections_content = []
            for did in query.yield_per(100):
                if (did.did_type == DIDType.CONTAINER or did.did_type == DIDType.DATASET):
                    collections_content += [d for d in list_content(scope=did.scope, name=did.name)]

            # Replace any name filtering with recursed DID names.
            for did in collections_content:
                for or_group in filters:
                    or_group['name'] = did['name']
                for result in self.list_dids(scope=did['scope'], filters=filters, recursive=True, did_type=did_type, limit=limit, offset=offset,
                                             long=long, ignore_dids=ignore_dids, session=session):
                    yield result

        for did in query.yield_per(5):                  # don't unpack this as it makes it dependent on query return order!
            if long:
                did_full = "{}:{}".format(did.scope, did.name)
                if did_full not in ignore_dids:         # concatenating results of OR clauses may contain duplicate DIDs if query result sets not mutually exclusive.
                    ignore_dids.add(did_full)
                    yield {
                        'scope': did.scope,
                        'name': did.name,
                        'did_type': str(did.did_type),
                        'bytes': did.bytes,
                        'length': did.length
                    }
            else:
                did_full = "{}:{}".format(did.scope, did.name)
                if did_full not in ignore_dids:         # concatenating results of OR clauses may contain duplicate DIDs if query result sets not mutually exclusive.
                    ignore_dids.add(did_full)
                    yield did.name
Example #10
0
    def set_metadata(self, scope, name, key, value, recursive=False, session=None):
        """
        Add metadata to data identifier.

        :param scope: The scope name.
        :param name: The data identifier name.
        :param key: the key.
        :param value: the value.
        :param did: The data identifier info.
        :param recursive: Option to propagate the metadata change to content.
        :param session: The database session in use.
        """
        try:
            rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name).\
                with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').one()
        except NoResultFound:
            raise exception.DataIdentifierNotFound("Data identifier '%s:%s' not found" % (scope, name))

        if key == 'lifetime':
            try:
                expired_at = None
                if value is not None:
                    expired_at = datetime.utcnow() + timedelta(seconds=float(value))
                rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name).update({'expired_at': expired_at}, synchronize_session='fetch')
            except TypeError as error:
                raise exception.InvalidValueForKey(error)
        elif key in ['guid', 'events']:
            rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False)

            session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False)
            if key == 'events':
                for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name):
                    events = session.query(func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one()[0]
                    session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update({'events': events}, synchronize_session=False)

        elif key == 'adler32':
            rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False)
            session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False)
            session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False)
            session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False)
        elif key == 'bytes':
            rowcount = session.query(models.DataIdentifier).filter_by(scope=scope, name=name, did_type=DIDType.FILE).update({key: value}, synchronize_session=False)
            session.query(models.DataIdentifierAssociation).filter_by(child_scope=scope, child_name=name, child_type=DIDType.FILE).update({key: value}, synchronize_session=False)
            session.query(models.Request).filter_by(scope=scope, name=name).update({key: value}, synchronize_session=False)

            for account, bytes, rse_id, rule_id in session.query(models.ReplicaLock.account, models.ReplicaLock.bytes, models.ReplicaLock.rse_id, models.ReplicaLock.rule_id).filter_by(scope=scope, name=name):
                session.query(models.ReplicaLock).filter_by(scope=scope, name=name, rule_id=rule_id, rse_id=rse_id).update({key: value}, synchronize_session=False)
                account_counter.decrease(rse_id=rse_id, account=account, files=1, bytes=bytes, session=session)
                account_counter.increase(rse_id=rse_id, account=account, files=1, bytes=value, session=session)

            for bytes, rse_id in session.query(models.RSEFileAssociation.bytes, models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name):
                session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).update({key: value}, synchronize_session=False)
                rse_counter.decrease(rse_id=rse_id, files=1, bytes=bytes, session=session)
                rse_counter.increase(rse_id=rse_id, files=1, bytes=value, session=session)

            for parent_scope, parent_name in session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).filter_by(child_scope=scope, child_name=name):

                values = {}
                values['length'], values['bytes'], values['events'] = session.query(func.count(models.DataIdentifierAssociation.scope),
                                                                                    func.sum(models.DataIdentifierAssociation.bytes),
                                                                                    func.sum(models.DataIdentifierAssociation.events)).filter_by(scope=parent_scope, name=parent_name).one()
                session.query(models.DataIdentifier).filter_by(scope=parent_scope, name=parent_name).update(values, synchronize_session=False)
                session.query(models.DatasetLock).filter_by(scope=parent_scope, name=parent_name).update({'length': values['length'], 'bytes': values['bytes']}, synchronize_session=False)
        else:
            try:
                rowcount = session.query(models.DataIdentifier).\
                    with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
                    filter_by(scope=scope, name=name).\
                    update({key: value}, synchronize_session='fetch')
            except CompileError as error:
                raise exception.InvalidMetadata(error)
            except InvalidRequestError:
                raise exception.InvalidMetadata("Key %s is not accepted" % key)

            # propagate metadata updates to child content
            if recursive:
                content_query = session.query(models.DataIdentifierAssociation.child_scope,
                                              models.DataIdentifierAssociation.child_name).\
                    with_hint(models.DataIdentifierAssociation,
                              "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\
                    filter_by(scope=scope, name=name)

                for child_scope, child_name in content_query:
                    try:
                        session.query(models.DataIdentifier).\
                            with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
                            filter_by(scope=child_scope, name=child_name).\
                            update({key: value}, synchronize_session='fetch')
                    except CompileError as error:
                        raise exception.InvalidMetadata(error)
                    except InvalidRequestError:
                        raise exception.InvalidMetadata("Key %s is not accepted" % key)

        if not rowcount:
            # check for did presence
            raise exception.UnsupportedOperation('%(key)s for %(scope)s:%(name)s cannot be updated' % locals())