Esempio n. 1
0
def __release_all_activities(stats, direction, rse_name, rse_id, logger, session):
    """
    Release requests if activities should be ignored.

    :param stats:          Request statistics
    :param direction:      String whether request statistics are based on source or destination RSEs.
    :param rse_name:       RSE name.
    :param rse_id:         RSE id.
    """
    threshold = stats['threshold']
    transfer = stats['transfer']
    waiting = stats['waiting']
    strategy = stats['strategy']
    if threshold is not None and transfer + waiting > threshold:
        record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', threshold, labels={'activity': 'all_activities', 'rse': rse_name, 'limit_attr': 'max_transfers'})
        record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', transfer, labels={'activity': 'all_activities', 'rse': rse_name, 'limit_attr': 'transfers'})
        record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', waiting, labels={'activity': 'all_activities', 'rse': rse_name, 'limit_attr': 'waiting'})
        if transfer < 0.8 * threshold:
            to_be_released = threshold - transfer
            if strategy == 'grouped_fifo':
                deadline = stats.get('deadline')
                volume = stats.get('volume')
                release_waiting_requests_grouped_fifo(rse_id, count=to_be_released, direction=direction, volume=volume, deadline=deadline, session=session)
            elif strategy == 'fifo':
                release_waiting_requests_fifo(rse_id, count=to_be_released, direction=direction, session=session)
        else:
            logger(logging.DEBUG, "Throttler has done nothing on rse %s (transfer > 0.8 * threshold)" % rse_name)
    elif waiting > 0 or not threshold:
        logger(logging.DEBUG, "Throttler remove limits(threshold: %s) and release all waiting requests, rse %s" % (threshold, rse_name))
        delete_rse_transfer_limits(rse_id, activity='all_activities', session=session)
        release_all_waiting_requests(rse_id, direction=direction, session=session)
        record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.{activity}.{rse}', labels={'activity': 'all_activities', 'rse': rse_name})
Esempio n. 2
0
def __schedule_requests():
    """
    Schedule requests
    """
    try:
        logging.info("Throttler retrieve requests statistics")
        results = get_stats_by_activity_dest_state(state=[
            RequestState.QUEUED, RequestState.SUBMITTING,
            RequestState.SUBMITTED, RequestState.WAITING
        ])
        result_dict = {}
        for activity, dest_rse_id, account, state, rse, counter in results:
            threshold = get_config_limit(activity, dest_rse_id)
            if threshold or (counter and (state == RequestState.WAITING)):
                if dest_rse_id not in result_dict:
                    result_dict[dest_rse_id] = {
                        'waiting':
                        0,
                        'transfer':
                        0,
                        'threshold':
                        get_config_limit('all_activities', dest_rse_id),
                        'rse':
                        rse,
                        'activities': {}
                    }

                if activity not in result_dict[dest_rse_id]['activities']:
                    result_dict[dest_rse_id]['activities'][activity] = {
                        'waiting': 0,
                        'transfer': 0,
                        'threshold': threshold,
                        'accounts': {}
                    }
                if account not in result_dict[dest_rse_id]['activities'][
                        activity]['accounts']:
                    result_dict[dest_rse_id]['activities'][activity][
                        'accounts'][account] = {
                            'waiting': 0,
                            'transfer': 0
                        }
                if state == RequestState.WAITING:
                    result_dict[dest_rse_id]['activities'][activity][
                        'accounts'][account]['waiting'] += counter
                    result_dict[dest_rse_id]['activities'][activity][
                        'waiting'] += counter
                    result_dict[dest_rse_id]['waiting'] += counter
                else:
                    result_dict[dest_rse_id]['activities'][activity][
                        'accounts'][account]['transfer'] += counter
                    result_dict[dest_rse_id]['activities'][activity][
                        'transfer'] += counter
                    result_dict[dest_rse_id]['transfer'] += counter

        for dest_rse_id in result_dict:
            dest_rse_release_strategy = config_core.get(
                'throttler_release_strategy',
                'dest_%s' % dest_rse_id,
                default='fifo',
                use_cache=False)
            rse_name = result_dict[dest_rse_id]['rse']
            availability = get_rse(dest_rse_id).availability
            if availability & 2:  # dest_rse is not blacklisted for write
                if dest_rse_release_strategy == 'grouped_fifo':
                    threshold = result_dict[dest_rse_id]['threshold']
                    transfer = result_dict[dest_rse_id]['transfer']
                    waiting = result_dict[dest_rse_id]['waiting']
                    if threshold and transfer + waiting > threshold:
                        record_gauge(
                            'daemons.conveyor.throttler.set_rse_transfer_limits.%s.max_transfers'
                            % (rse_name), threshold)
                        record_gauge(
                            'daemons.conveyor.throttler.set_rse_transfer_limits.%s.transfers'
                            % (rse_name), transfer)
                        record_gauge(
                            'daemons.conveyor.throttler.set_rse_transfer_limits.%s.waitings'
                            % (rse_name), waiting)
                        if transfer < 0.8 * threshold:
                            to_be_released = threshold - transfer
                            release_waiting_requests_grouped_fifo(
                                rse_id=dest_rse_id, count=to_be_released)
                        else:
                            logging.debug(
                                "Throttler has done nothing on rse %s (transfer > 0.8 * threshold)"
                                % rse_name)
                    elif waiting > 0 or not threshold:
                        logging.debug(
                            "Throttler remove limits(threshold: %s) and release all waiting requests, rse %s"
                            % (threshold, rse_name))
                        delete_rse_transfer_limits(rse_id=dest_rse_id,
                                                   activity=activity)
                        release_all_waiting_requests(rse_id=dest_rse_id)
                        record_counter(
                            'daemons.conveyor.throttler.delete_rse_transfer_limits.%s'
                            % (rse_name))
                elif dest_rse_release_strategy == 'fifo':
                    for activity in result_dict[dest_rse_id]['activities']:
                        threshold = result_dict[dest_rse_id]['activities'][
                            activity]['threshold']
                        transfer = result_dict[dest_rse_id]['activities'][
                            activity]['transfer']
                        waiting = result_dict[dest_rse_id]['activities'][
                            activity]['waiting']
                        if waiting:
                            logging.debug(
                                "Request status for %s at %s: %s" %
                                (activity, rse_name, result_dict[dest_rse_id]
                                 ['activities'][activity]))
                        if threshold is None:
                            logging.debug(
                                "Throttler remove limits(threshold: %s) and release all waiting requests for activity %s, rse_id %s"
                                % (threshold, activity, dest_rse_id))
                            delete_rse_transfer_limits(rse_id=dest_rse_id,
                                                       activity=activity)
                            release_all_waiting_requests(rse_id=dest_rse_id,
                                                         activity=activity)
                            record_counter(
                                'daemons.conveyor.throttler.delete_rse_transfer_limits.%s.%s'
                                % (activity, rse_name))
                        elif transfer + waiting > threshold:
                            logging.debug(
                                "Throttler set limits for activity %s, rse %s"
                                % (activity, rse_name))
                            set_rse_transfer_limits(rse_id=dest_rse_id,
                                                    activity=activity,
                                                    max_transfers=threshold,
                                                    transfers=transfer,
                                                    waitings=waiting)
                            record_gauge(
                                'daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.max_transfers'
                                % (activity, rse_name), threshold)
                            record_gauge(
                                'daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.transfers'
                                % (activity, rse_name), transfer)
                            record_gauge(
                                'daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.waitings'
                                % (activity, rse_name), waiting)
                            if transfer < 0.8 * threshold:
                                # release requests on account
                                nr_accounts = len(
                                    result_dict[dest_rse_id]['activities']
                                    [activity]['accounts'])
                                if nr_accounts < 1:
                                    nr_accounts = 1
                                to_release = threshold - transfer
                                threshold_per_account = math.ceil(threshold /
                                                                  nr_accounts)
                                to_release_per_account = math.ceil(to_release /
                                                                   nr_accounts)
                                accounts = result_dict[dest_rse_id][
                                    'activities'][activity]['accounts']
                                for account in accounts:
                                    if nr_accounts == 1:
                                        logging.debug(
                                            "Throttler release %s waiting requests for activity %s, rse %s, account %s "
                                            % (to_release, activity, rse_name,
                                               account))
                                        release_waiting_requests_fifo(
                                            rse_id=dest_rse_id,
                                            activity=activity,
                                            account=account,
                                            count=to_release)
                                        record_gauge(
                                            'daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s'
                                            % (activity, rse_name, account),
                                            to_release)
                                    elif accounts[account][
                                            'transfer'] > threshold_per_account:
                                        logging.debug(
                                            "Throttler will not release  %s waiting requests for activity %s, rse %s, account %s: It queued more transfers than its share "
                                            % (accounts[account]['waiting'],
                                               activity, rse_name, account))
                                        nr_accounts -= 1
                                        to_release_per_account = math.ceil(
                                            to_release / nr_accounts)
                                    elif accounts[account][
                                            'waiting'] < to_release_per_account:
                                        logging.debug(
                                            "Throttler release %s waiting requests for activity %s, rse %s, account %s "
                                            % (accounts[account]['waiting'],
                                               activity, rse_name, account))
                                        release_waiting_requests_fifo(
                                            rse_id=dest_rse_id,
                                            activity=activity,
                                            account=account,
                                            count=accounts[account]['waiting'])
                                        record_gauge(
                                            'daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s'
                                            % (activity, rse_name, account),
                                            accounts[account]['waiting'])
                                        to_release = to_release - accounts[
                                            account]['waiting']
                                        nr_accounts -= 1
                                        to_release_per_account = math.ceil(
                                            to_release / nr_accounts)
                                    else:
                                        logging.debug(
                                            "Throttler release %s waiting requests for activity %s, rse %s, account %s "
                                            % (to_release_per_account,
                                               activity, rse_name, account))
                                        release_waiting_requests_fifo(
                                            rse_id=dest_rse_id,
                                            activity=activity,
                                            account=account,
                                            count=to_release_per_account)
                                        record_gauge(
                                            'daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s'
                                            % (activity, rse_name, account),
                                            to_release_per_account)
                                        to_release = to_release - to_release_per_account
                                        nr_accounts -= 1
                            else:
                                logging.debug(
                                    "Throttler has done nothing for activity %s on rse %s (transfer > 0.8 * threshold)"
                                    % (activity, rse_name))

                        elif waiting > 0:
                            logging.debug(
                                "Throttler remove limits(threshold: %s) and release all waiting requests for activity %s, rse %s"
                                % (threshold, activity, rse_name))
                            delete_rse_transfer_limits(rse_id=dest_rse_id,
                                                       activity=activity)
                            release_all_waiting_requests(rse_id=dest_rse_id,
                                                         activity=activity)
                            record_counter(
                                'daemons.conveyor.throttler.delete_rse_transfer_limits.%s.%s'
                                % (activity, rse_name))
    except Exception:
        logging.critical("Failed to schedule requests, error: %s" %
                         (traceback.format_exc()))
Esempio n. 3
0
    def test_release_waiting_requests_grouped_fifo(self):
        """ REQUEST (CORE): release waiting requests based on grouped FIFO. """
        if self.dialect == 'mysql':
            return True

        # set max_volume to 0 to check first without releasing extra requests
        set_rse_transfer_limits(self.dest_rse_id,
                                self.all_activities,
                                volume=0,
                                max_transfers=1,
                                session=self.db_session)

        # one request with an unattached DID -> one request should be released
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name,
                    1,
                    self.account,
                    session=self.db_session)
        requests = [{
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }]
        queue_requests(requests, session=self.db_session)
        release_waiting_requests_grouped_fifo(self.dest_rse_id,
                                              count=1,
                                              session=self.db_session)
        request = get_request_by_did(self.scope,
                                     name,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.QUEUED)

        # one request with an attached DID -> one request should be released
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name = generate_uuid()
        dataset_name = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name,
                    1,
                    self.account,
                    session=self.db_session)
        add_did(self.scope,
                dataset_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        attach_dids(self.scope,
                    dataset_name, [{
                        'name': name,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        requests = [{
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'scope': self.scope,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }]
        queue_requests(requests, session=self.db_session)
        release_waiting_requests_grouped_fifo(self.dest_rse_id,
                                              count=1,
                                              session=self.db_session)
        request = get_request_by_did(self.scope,
                                     name,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.QUEUED)

        # five requests with different requested_at and multiple attachments per collection -> release only one request -> two requests of one collection should be released
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name1 = generate_uuid()
        name2 = generate_uuid()
        name3 = generate_uuid()
        name4 = generate_uuid()
        name5 = generate_uuid()
        dataset_1_name = generate_uuid()
        add_did(self.scope,
                dataset_1_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        dataset_2_name = generate_uuid()
        add_did(self.scope,
                dataset_2_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name1,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name2,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name3,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name4,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name5,
                    1,
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_1_name, [{
                        'name': name1,
                        'scope': self.scope
                    }, {
                        'name': name2,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_2_name, [{
                        'name': name3,
                        'scope': self.scope
                    }, {
                        'name': name4,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)

        requests = [{
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name1,
            'scope': self.scope,
            'retry_count': 1,
            'rule_id': generate_uuid(),
            'requested_at': datetime.now().replace(year=2000),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name2,
            'requested_at': datetime.now().replace(year=2020),
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name3,
            'requested_at': datetime.now().replace(year=2015),
            'retry_count': 1,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name4,
            'requested_at': datetime.now().replace(year=2010),
            'retry_count': 1,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name5,
            'retry_count': 1,
            'requested_at': datetime.now().replace(year=2018),
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }]
        queue_requests(requests, session=self.db_session)
        release_waiting_requests_grouped_fifo(self.dest_rse_id,
                                              count=1,
                                              session=self.db_session)
        request_1 = get_request_by_did(self.scope,
                                       name1,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_1['state'], constants.RequestState.QUEUED)
        request_2 = get_request_by_did(self.scope,
                                       name2,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_2['state'], constants.RequestState.QUEUED)
        request_3 = get_request_by_did(self.scope,
                                       name3,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_3['state'], constants.RequestState.WAITING)
        request_4 = get_request_by_did(self.scope,
                                       name4,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_4['state'], constants.RequestState.WAITING)
        request_5 = get_request_by_did(self.scope,
                                       name5,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_5['state'], constants.RequestState.WAITING)

        # with maximal volume check -> release one request -> three requests should be released because of attachments and free volume space
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name1 = generate_uuid()
        name2 = generate_uuid()
        name3 = generate_uuid()
        dataset_1_name = generate_uuid()
        add_did(self.scope,
                dataset_1_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name1,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name2,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name3,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name4,
                    1,
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_1_name, [{
                        'name': name1,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_1_name, [{
                        'name': name2,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        set_rse_transfer_limits(self.dest_rse_id,
                                self.all_activities,
                                volume=10,
                                max_transfers=1,
                                session=self.db_session)
        requests = [
            {
                'source_rse_id': self.source_rse_id,
                'dest_rse_id': self.dest_rse_id,
                'request_type': constants.RequestType.TRANSFER,
                'request_id': generate_uuid(),
                'name': name1,
                'bytes': 1,
                'scope': self.scope,
                'retry_count': 1,
                'rule_id': generate_uuid(),
                'requested_at': datetime.now().replace(year=2000),
                'attributes': {
                    'activity': self.user_activity,
                    'bytes': 1,
                    'md5': '',
                    'adler32': ''
                }
            },
            {
                'source_rse_id': self.source_rse_id,
                'dest_rse_id': self.dest_rse_id,
                'request_type': constants.RequestType.TRANSFER,
                'request_id': generate_uuid(),
                'name': name2,
                'bytes': 2,
                'requested_at': datetime.now().replace(year=2020),
                'rule_id': generate_uuid(),
                'scope': self.scope,
                'retry_count': 1,
                'attributes': {
                    'activity': self.user_activity,
                    'bytes': 2,
                    'md5': '',
                    'adler32': ''
                }
            },
            {
                'source_rse_id': self.source_rse_id,
                'dest_rse_id': self.dest_rse_id,
                'request_type': constants.RequestType.TRANSFER,
                'request_id': generate_uuid(),
                'name': name3,
                'bytes': 3,
                'requested_at': datetime.now().replace(
                    year=2021
                ),  # requested after the request below but small enough for max_volume check
                'rule_id': generate_uuid(),
                'scope': self.scope,
                'retry_count': 1,
                'attributes': {
                    'activity': self.user_activity,
                    'bytes': 3,
                    'md5': '',
                    'adler32': ''
                }
            },
            {
                'source_rse_id': self.source_rse_id,
                'dest_rse_id': self.dest_rse_id,
                'request_type': constants.RequestType.TRANSFER,
                'request_id': generate_uuid(),
                'name': name4,
                'bytes': 3000,
                'requested_at': datetime.now().replace(year=2020),
                'rule_id': generate_uuid(),
                'scope': self.scope,
                'retry_count': 1,
                'attributes': {
                    'activity': self.user_activity,
                    'bytes': 3000,
                    'md5': '',
                    'adler32': ''
                }
            }
        ]

        queue_requests(requests, session=self.db_session)
        amount_updated_requests = release_waiting_requests_grouped_fifo(
            self.dest_rse_id, count=1, session=self.db_session)
        assert_equal(amount_updated_requests, 3)
        # released because it got requested first
        request_1 = get_request_by_did(self.scope,
                                       name1,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_1['state'], constants.RequestState.QUEUED)
        # released because the DID is attached to the same dataset
        request_2 = get_request_by_did(self.scope,
                                       name2,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_2['state'], constants.RequestState.QUEUED)
        # released because of available volume
        request_3 = get_request_by_did(self.scope,
                                       name3,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_3['state'], constants.RequestState.QUEUED)
        # still waiting because there is no free volume
        request_4 = get_request_by_did(self.scope,
                                       name4,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_4['state'], constants.RequestState.WAITING)

        # with maximal volume check -> release one request -> two requests should be released because of attachments
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name1 = generate_uuid()
        name2 = generate_uuid()
        name3 = generate_uuid()
        name4 = generate_uuid()
        dataset_1_name = generate_uuid()
        add_did(self.scope,
                dataset_1_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name1,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name2,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name3,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name4,
                    1,
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_1_name, [{
                        'name': name1,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_1_name, [{
                        'name': name2,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        set_rse_transfer_limits(self.dest_rse_id,
                                self.all_activities,
                                volume=5,
                                max_transfers=1,
                                session=self.db_session)
        request = models.Request(dest_rse_id=self.dest_rse_id,
                                 bytes=2,
                                 activity=self.all_activities,
                                 state=constants.RequestState.SUBMITTED)
        request.save(session=self.db_session)
        requests = [{
            'dest_rse_id': self.dest_rse_id,
            'source_rse_id': self.source_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name1,
            'bytes': 1,
            'scope': self.scope,
            'retry_count': 1,
            'rule_id': generate_uuid(),
            'requested_at': datetime.now().replace(year=2000),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'source_rse_id': self.source_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name2,
            'bytes': 2,
            'requested_at': datetime.now().replace(year=2020),
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 2,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'source_rse_id': self.source_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name3,
            'bytes': 1,
            'requested_at': datetime.now().replace(year=2020),
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'source_rse_id': self.source_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name4,
            'bytes': 1,
            'requested_at': datetime.now().replace(year=2020),
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }]

        queue_requests(requests, session=self.db_session)
        release_waiting_requests_grouped_fifo(self.dest_rse_id,
                                              count=1,
                                              session=self.db_session)
        # released because it got requested first
        request_1 = get_request_by_did(self.scope,
                                       name1,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_1['state'], constants.RequestState.QUEUED)
        # released because the DID is attached to the same dataset
        request_2 = get_request_by_did(self.scope,
                                       name2,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_2['state'], constants.RequestState.QUEUED)
        # still waiting because there is no free volume after releasing the two requests above
        request_3 = get_request_by_did(self.scope,
                                       name3,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_3['state'], constants.RequestState.WAITING)
        request_4 = get_request_by_did(self.scope,
                                       name4,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_4['state'], constants.RequestState.WAITING)