コード例 #1
0
ファイル: test_throttler.py プロジェクト: arisfkiaras/rucio
 def setUpClass(cls):
     cls.db_session = session.get_session()
     cls.dest_rse = 'MOCK'
     cls.source_rse = 'MOCK4'
     cls.dest_rse_id = get_rse_id(cls.dest_rse)
     cls.source_rse_id = get_rse_id(cls.source_rse)
     cls.scope = InternalScope('mock')
     cls.account = InternalAccount('root')
     cls.user_activity = 'User Subscription'
     cls.all_activities = 'all_activities'
     set_rse_transfer_limits(cls.dest_rse_id, cls.user_activity, max_transfers=1, session=cls.db_session)
     set('throttler_release_strategy', 'dest_%s' % cls.dest_rse_id, 'fifo', session=cls.db_session)
コード例 #2
0
def dest_throttler(db_session, mock_request):
    config.set('throttler', 'mode', 'DEST_PER_ACT', session=db_session)
    set_rse_transfer_limits(mock_request.dest_rse_id,
                            activity=mock_request.activity,
                            max_transfers=1,
                            strategy='fifo',
                            session=db_session)
    db_session.commit()

    yield

    db_session.query(models.RSETransferLimit).filter_by(
        rse_id=mock_request.dest_rse_id).delete()
    config.remove_option('throttler', 'mode', session=db_session)
    db_session.commit()
コード例 #3
0
ファイル: test_throttler.py プロジェクト: ijjorama/rucio
    def setUpClass(cls):
        if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
            cls.vo = {'vo': 'tst'}
        else:
            cls.vo = {}

        cls.db_session = session.get_session()
        cls.dest_rse = 'MOCK'
        cls.source_rse = 'MOCK4'
        cls.dest_rse_id = get_rse_id(cls.dest_rse, **cls.vo)
        cls.source_rse_id = get_rse_id(cls.source_rse, **cls.vo)
        cls.scope = InternalScope('mock', **cls.vo)
        cls.account = InternalAccount('root', **cls.vo)
        cls.user_activity = 'User Subscription'
        cls.all_activities = 'all_activities'
        set_rse_transfer_limits(cls.dest_rse_id, cls.user_activity, max_transfers=1, session=cls.db_session)
        set('throttler_release_strategy', 'dest_%s' % cls.dest_rse_id, 'fifo', session=cls.db_session)
コード例 #4
0
 def setUp(self):
     self.db_session.query(models.Request).delete()
     self.db_session.commit()
     # set transfer limits to put requests in waiting state
     set_rse_transfer_limits(self.dest_rse_id, self.user_activity, max_transfers=1, session=self.db_session)
     set_rse_transfer_limits(self.dest_rse_id, self.all_activities, max_transfers=1, session=self.db_session)
     set_rse_transfer_limits(self.dest_rse_id, 'ignore', max_transfers=1, session=self.db_session)
コード例 #5
0
def __release_per_activity(stats, direction, rse_name, rse_id, logger, session):
    """
    Release requests per activity.

    :param stats:          Request statistics
    :param direction:      String whether request statistics are based on source or destination RSEs.
    :param rse_name:       RSE name.
    :param rse_id:         RSE id.
    """
    for activity in stats['activities']:
        threshold = stats['activities'][activity]['threshold']
        transfer = stats['activities'][activity]['transfer']
        waiting = stats['activities'][activity]['waiting']
        if waiting:
            logger(logging.DEBUG, "Request status for %s at %s: %s" % (activity, rse_name,
                                                                       stats['activities'][activity]))
            if threshold is None:
                logger(logging.DEBUG, "Throttler remove limits(threshold: %s) and release all waiting requests for activity %s, rse_id %s" % (threshold, activity, rse_id))
                delete_rse_transfer_limits(rse_id, activity=activity, session=session)
                release_all_waiting_requests(rse_id, activity=activity, direction=direction, session=session)
                record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.{activity}.{rse}', labels={'activity': activity, 'rse': rse_name})
            elif transfer + waiting > threshold:
                logger(logging.DEBUG, "Throttler set limits for activity %s, rse %s" % (activity, rse_name))
                set_rse_transfer_limits(rse_id, activity=activity, max_transfers=threshold, transfers=transfer, waitings=waiting, session=session)
                record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', threshold, labels={'activity': activity, 'rse': rse_name, 'limit_attr': 'max_transfers'})
                record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', transfer, labels={'activity': activity, 'rse': rse_name, 'limit_attr': 'transfers'})
                record_gauge('daemons.conveyor.throttler.set_rse_transfer_limits.{activity}.{rse}.{limit_attr}', waiting, labels={'activity': activity, 'rse': rse_name, 'limit_attr': 'waiting'})
                if transfer < 0.8 * threshold:
                    # release requests on account
                    nr_accounts = len(stats['activities'][activity]['accounts'])
                    if nr_accounts < 1:
                        nr_accounts = 1
                    to_release = threshold - transfer
                    threshold_per_account = math.ceil(threshold / nr_accounts)
                    to_release_per_account = math.ceil(to_release / nr_accounts)
                    accounts = stats['activities'][activity]['accounts']
                    for account in accounts:
                        if nr_accounts == 1:
                            logger(logging.DEBUG, "Throttler release %s waiting requests for activity %s, rse %s, account %s " % (to_release, activity, rse_name, account))
                            release_waiting_requests_fifo(rse_id, activity=activity, account=account, count=to_release, direction=direction, session=session)
                            record_gauge('daemons.conveyor.throttler.release_waiting_requests.{activity}.{rse}.{account}', to_release, labels={'activity': activity, 'rse': rse_name, 'account': account})
                        elif accounts[account]['transfer'] > threshold_per_account:
                            logger(logging.DEBUG, "Throttler will not release  %s waiting requests for activity %s, rse %s, account %s: It queued more transfers than its share " %
                                   (accounts[account]['waiting'], activity, rse_name, account))
                            nr_accounts -= 1
                            to_release_per_account = math.ceil(to_release / nr_accounts)
                        elif accounts[account]['waiting'] < to_release_per_account:
                            logger(logging.DEBUG, "Throttler release %s waiting requests for activity %s, rse %s, account %s " % (accounts[account]['waiting'], activity, rse_name, account))
                            release_waiting_requests_fifo(rse_id, activity=activity, account=account, count=accounts[account]['waiting'], direction=direction, session=session)
                            record_gauge('daemons.conveyor.throttler.release_waiting_requests.{activity}.{rse}.{account}', accounts[account]['waiting'], labels={'activity': activity, 'rse': rse_name, 'account': account})
                            to_release = to_release - accounts[account]['waiting']
                            nr_accounts -= 1
                            to_release_per_account = math.ceil(to_release / nr_accounts)
                        else:
                            logger(logging.DEBUG, "Throttler release %s waiting requests for activity %s, rse %s, account %s " % (to_release_per_account, activity, rse_name, account))
                            release_waiting_requests_fifo(rse_id, activity=activity, account=account, count=to_release_per_account, direction=direction, session=session)
                            record_gauge('daemons.conveyor.throttler.release_waiting_requests.{activity}.{rse}.{account}', to_release_per_account, labels={'activity': activity, 'rse': rse_name, 'account': account})
                            to_release = to_release - to_release_per_account
                            nr_accounts -= 1
                else:
                    logger(logging.DEBUG, "Throttler has done nothing for activity %s on rse %s (transfer > 0.8 * threshold)" % (activity, rse_name))
            elif waiting > 0:
                logger(logging.DEBUG, "Throttler remove limits(threshold: %s) and release all waiting requests for activity %s, rse %s" % (threshold, activity, rse_name))
                delete_rse_transfer_limits(rse_id, activity=activity, session=session)
                release_all_waiting_requests(rse_id, activity=activity, direction=direction, session=session)
                record_counter('daemons.conveyor.throttler.delete_rse_transfer_limits.{activity}.{rse}', labels={'activity': activity, 'rse': rse_name})
コード例 #6
0
def import_data(data, session=None):
    """
    Import data to add and update records in Rucio.

    :param data: data to be imported as dictionary.
    :param session: database session in use.
    """
    # RSEs
    rses = data.get('rses')
    if rses:
        for rse in rses:
            protocols = rse.get('protocols')
            if protocols:
                protocols = protocols.get('protocols')
                del rse['protocols']
            rse_name = rse['rse']
            del rse['rse']
            if not rse_module.rse_exists(rse_name, session=session):
                rse_module.add_rse(rse_name,
                                   deterministic=rse.get('deterministic'),
                                   volatile=rse.get('volatile'),
                                   city=rse.get('city'),
                                   region_code=rse.get('region_code'),
                                   country_name=rse.get('country_name'),
                                   staging_area=rse.get('staging_area'),
                                   continent=rse.get('continent'),
                                   time_zone=rse.get('time_zone'),
                                   ISP=rse.get('ISP'),
                                   rse_type=rse.get('rse_type'),
                                   latitude=rse.get('latitude'),
                                   longitude=rse.get('longitude'),
                                   ASN=rse.get('ASN'),
                                   availability=rse.get('availability'),
                                   session=session)
            else:
                rse_module.update_rse(rse_name, rse, session=session)

            # Protocols
            if protocols:
                old_protocols = rse_module.get_rse_protocols(rse=rse_name,
                                                             session=session)
                for protocol in protocols:
                    scheme = protocol.get('scheme')
                    hostname = protocol.get('hostname')
                    port = protocol.get('port')
                    intersection = [
                        old_protocol
                        for old_protocol in old_protocols['protocols']
                        if old_protocol['scheme'] == scheme
                        and old_protocol['hostname'] == hostname
                        and old_protocol['port'] == port
                    ]
                    if intersection:
                        del protocol['scheme']
                        del protocol['hostname']
                        del protocol['port']
                        rse_module.update_protocols(rse=rse_name,
                                                    scheme=scheme,
                                                    data=protocol,
                                                    hostname=hostname,
                                                    port=port,
                                                    session=session)
                    else:
                        rse_module.add_protocol(rse=rse_name,
                                                parameter=protocol,
                                                session=session)

            # Limits
            limits = rse.get('limits')
            if limits:
                old_limits = rse_module.get_rse_limits(rse=rse_name,
                                                       session=session)
                for limit in limits:
                    if limit in old_limits:
                        rse_module.delete_rse_limit(rse=rse_name,
                                                    name=limit,
                                                    session=session)
                    rse_module.set_rse_limits(rse=rse_name,
                                              name=limit,
                                              value=limits[limit],
                                              session=session)

            # Transfer limits
            transfer_limits = rse.get('transfer_limits')
            if transfer_limits:
                for limit in transfer_limits:
                    old_transfer_limits = rse_module.get_rse_transfer_limits(
                        rse=rse_name, activity=limit, session=session)
                    if limit in old_transfer_limits:
                        rse_module.delete_rse_transfer_limits(rse=rse_name,
                                                              activity=limit,
                                                              session=session)
                    max_transfers = transfer_limits[limit].items(
                    )[0][1]['max_transfers']
                    rse_module.set_rse_transfer_limits(
                        rse=rse_name,
                        activity=limit,
                        max_transfers=max_transfers,
                        session=session)

            # Attributes
            attributes = rse.get('attributes')
            if attributes:
                old_attributes = rse_module.list_rse_attributes(
                    rse=rse_name, session=session)
                for attr in attributes:
                    if attr in old_attributes:
                        rse_module.del_rse_attribute(rse=rse_name,
                                                     key=attr,
                                                     session=session)
                    rse_module.add_rse_attribute(rse=rse_name,
                                                 key=attr,
                                                 value=attributes[attr],
                                                 session=session)

    # Distances
    distances = data.get('distances')
    if distances:
        for src_rse_name in distances:
            src = rse_module.get_rse_id(src_rse_name, session=session)
            for dest_rse_name in distances[src_rse_name]:
                dest = rse_module.get_rse_id(dest_rse_name, session=session)
                distance = distances[src_rse_name][dest_rse_name]
                del distance['src_rse_id']
                del distance['dest_rse_id']

                old_distance = distance_module.get_distances(src_rse_id=src,
                                                             dest_rse_id=dest,
                                                             session=session)
                if old_distance:
                    distance_module.update_distances(src_rse_id=src,
                                                     dest_rse_id=dest,
                                                     parameters=distance,
                                                     session=session)
                else:
                    distance_module.add_distance(
                        src_rse_id=src,
                        dest_rse_id=dest,
                        ranking=distance.get('ranking'),
                        agis_distance=distance.get('agis_distance'),
                        geoip_distance=distance.get('geoip_distance'),
                        active=distance.get('active'),
                        submitted=distance.get('submitted'),
                        transfer_speed=distance.get('transfer_speed'),
                        finished=distance.get('finished'),
                        failed=distance.get('failed'),
                        session=session)
コード例 #7
0
ファイル: test_throttler.py プロジェクト: arisfkiaras/rucio
    def test_throttler_grouped_fifo_subset(self):
        """ THROTTLER (CLIENTS): throttler release subset of waiting requests (grouped fifo). """
        set_rse_transfer_limits(self.dest_rse_id, self.all_activities, volume=10, max_transfers=1, session=self.db_session)
        set('throttler', '%s,%s' % (self.all_activities, self.dest_rse), 1, session=self.db_session)  # threshold used by throttler
        name1 = generate_uuid()
        name2 = generate_uuid()
        name3 = generate_uuid()
        name4 = generate_uuid()
        dataset_1_name = generate_uuid()
        add_did(self.scope, dataset_1_name, constants.DIDType.DATASET, self.account, session=self.db_session)
        add_replica(self.source_rse_id, self.scope, name1, 1, self.account, session=self.db_session)
        add_replica(self.source_rse_id, self.scope, name2, 1, self.account, session=self.db_session)
        add_replica(self.source_rse_id, self.scope, name3, 1, self.account, session=self.db_session)
        add_replica(self.source_rse_id, self.scope, name4, 1, self.account, session=self.db_session)
        attach_dids(self.scope, dataset_1_name, [{'name': name1, 'scope': self.scope}], self.account, session=self.db_session)
        attach_dids(self.scope, dataset_1_name, [{'name': name2, 'scope': self.scope}], self.account, session=self.db_session)
        requests = [{
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name1,
            'bytes': 1,
            'scope': self.scope,
            'retry_count': 1,
            'rule_id': generate_uuid(),
            'requested_at': datetime.now().replace(year=2000),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name2,
            'bytes': 2,
            'requested_at': datetime.now().replace(year=2020),
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 2,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name3,
            'bytes': 3,
            'requested_at': datetime.now().replace(year=2021),  # requested after the request below but small enough for max_volume check
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 3,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name4,
            'bytes': 3000,
            'requested_at': datetime.now().replace(year=2020),
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 3000,
                'md5': '',
                'adler32': ''
            }
        }]

        queue_requests(requests, session=self.db_session)
        self.db_session.commit()
        throttler.run(once=True, sleep_time=1)
        # released because it got requested first
        request_1 = get_request_by_did(self.scope, name1, self.dest_rse_id)
        assert_equal(request_1['state'], constants.RequestState.QUEUED)
        # released because the DID is attached to the same dataset
        request_2 = get_request_by_did(self.scope, name2, self.dest_rse_id)
        assert_equal(request_2['state'], constants.RequestState.QUEUED)
        # released because of available volume
        request_3 = get_request_by_did(self.scope, name3, self.dest_rse_id)
        assert_equal(request_3['state'], constants.RequestState.QUEUED)
        # still waiting because there is no free volume
        request_4 = get_request_by_did(self.scope, name4, self.dest_rse_id)
        assert_equal(request_4['state'], constants.RequestState.WAITING)
コード例 #8
0
ファイル: test_request.py プロジェクト: rak108/rucio
def test_queue_requests_state(vo, use_preparer):
    """ REQUEST (CORE): test queuing requests """

    if use_preparer == 'preparer enabled':
        use_preparer = True
    elif use_preparer == 'preparer disabled':
        use_preparer = False
    else:
        return pytest.xfail(reason=f'unknown test parameter use_preparer={use_preparer}')

    db_session = session.get_session()
    dest_rse = 'MOCK'
    dest_rse2 = 'MOCK2'
    source_rse = 'MOCK4'
    source_rse2 = 'MOCK5'
    dest_rse_id = get_rse_id(dest_rse, vo=vo)
    dest_rse_id2 = get_rse_id(dest_rse2, vo=vo)
    source_rse_id = get_rse_id(source_rse, vo=vo)
    source_rse_id2 = get_rse_id(source_rse2, vo=vo)
    scope = InternalScope('mock', vo=vo)
    account = InternalAccount('root', vo=vo)
    user_activity = 'User Subscription'
    config_set('conveyor', 'use_preparer', str(use_preparer))
    target_state = RequestState.PREPARING if use_preparer else RequestState.QUEUED

    name = generate_uuid()
    name2 = generate_uuid()
    name3 = generate_uuid()
    add_replica(source_rse_id, scope, name, 1, account, session=db_session)
    add_replica(source_rse_id2, scope, name2, 1, account, session=db_session)
    add_replica(source_rse_id, scope, name3, 1, account, session=db_session)

    set_rse_transfer_limits(dest_rse_id, user_activity, max_transfers=1, session=db_session)
    set_rse_transfer_limits(dest_rse_id2, user_activity, max_transfers=1, session=db_session)
    set_rse_transfer_limits(source_rse_id, user_activity, max_transfers=1, session=db_session)
    set_rse_transfer_limits(source_rse_id2, user_activity, max_transfers=1, session=db_session)

    requests = [{
        'dest_rse_id': dest_rse_id,
        'src_rse_id': source_rse_id,
        'request_type': RequestType.TRANSFER,
        'request_id': generate_uuid(),
        'name': name,
        'scope': scope,
        'rule_id': generate_uuid(),
        'retry_count': 1,
        'requested_at': datetime.now().replace(year=2015),
        'attributes': {
            'activity': user_activity,
            'bytes': 10,
            'md5': '',
            'adler32': ''
        }
    }, {
        'dest_rse_id': dest_rse_id,
        'src_rse_id': source_rse_id2,
        'request_type': RequestType.TRANSFER,
        'request_id': generate_uuid(),
        'name': name2,
        'scope': scope,
        'rule_id': generate_uuid(),
        'retry_count': 1,
        'requested_at': datetime.now().replace(year=2015),
        'attributes': {
            'activity': 'unknown',
            'bytes': 10,
            'md5': '',
            'adler32': ''
        }
    }, {
        'dest_rse_id': dest_rse_id2,
        'src_rse_id': source_rse_id,
        'request_type': RequestType.TRANSFER,
        'request_id': generate_uuid(),
        'name': name3,
        'scope': scope,
        'rule_id': generate_uuid(),
        'retry_count': 1,
        'requested_at': datetime.now().replace(year=2015),
        'attributes': {
            'activity': user_activity,
            'bytes': 10,
            'md5': '',
            'adler32': ''
        }
    }]
    try:
        queue_requests(requests, session=db_session)
        request = get_request_by_did(scope, name, dest_rse_id, session=db_session)
        assert request['state'] == target_state
        request = get_request_by_did(scope, name2, dest_rse_id, session=db_session)
        assert request['state'] == target_state
        request = get_request_by_did(scope, name3, dest_rse_id2, session=db_session)
        assert request['state'] == target_state

    finally:
        config_remove_option('conveyor', 'use_preparer')
        db_session.query(models.Source).delete()
        db_session.query(models.Request).delete()
        db_session.query(models.RSETransferLimit).delete()
        db_session.query(models.Distance).delete()
        db_session.commit()
        reset_config_table()
コード例 #9
0
def __schedule_requests():
    """
    Schedule requests
    """
    try:
        logging.info("Throttler retrieve requests statistics")
        results = get_stats_by_activity_dest_state(state=[
            RequestState.QUEUED, RequestState.SUBMITTING,
            RequestState.SUBMITTED, RequestState.WAITING
        ])
        result_dict = {}
        for activity, dest_rse_id, account, state, rse, counter in results:
            threshold = get_config_limit(activity, dest_rse_id)

            if threshold or (counter and (state == RequestState.WAITING)):
                if activity not in result_dict:
                    result_dict[activity] = {}
                if dest_rse_id not in result_dict[activity]:
                    result_dict[activity][dest_rse_id] = {
                        'waiting': 0,
                        'transfer': 0,
                        'threshold': threshold,
                        'accounts': {},
                        'rse': rse
                    }
                if account not in result_dict[activity][dest_rse_id][
                        'accounts']:
                    result_dict[activity][dest_rse_id]['accounts'][account] = {
                        'waiting': 0,
                        'transfer': 0
                    }
                if state == RequestState.WAITING:
                    result_dict[activity][dest_rse_id]['accounts'][account][
                        'waiting'] += counter
                    result_dict[activity][dest_rse_id]['waiting'] += counter
                else:
                    result_dict[activity][dest_rse_id]['accounts'][account][
                        'transfer'] += counter
                    result_dict[activity][dest_rse_id]['transfer'] += counter

        for activity in result_dict:
            for dest_rse_id in result_dict[activity]:
                threshold = result_dict[activity][dest_rse_id]['threshold']
                transfer = result_dict[activity][dest_rse_id]['transfer']
                waiting = result_dict[activity][dest_rse_id]['waiting']
                rse_name = result_dict[activity][dest_rse_id]['rse']
                if waiting:
                    logging.debug("Request status for %s at %s: %s" %
                                  (activity, rse_name,
                                   result_dict[activity][dest_rse_id]))

                if threshold is None:
                    logging.debug(
                        "Throttler remove limits(threshold: %s) and release all waiting requests for activity %s, rse_id %s"
                        % (threshold, activity, dest_rse_id))
                    delete_rse_transfer_limits(rse=None,
                                               activity=activity,
                                               rse_id=dest_rse_id)
                    release_waiting_requests(rse=None,
                                             activity=activity,
                                             rse_id=dest_rse_id)
                    record_counter(
                        'daemons.conveyor.throttler.delete_rse_transfer_limits.%s.%s'
                        % (activity, rse_name))

                elif transfer + waiting > threshold:
                    logging.debug(
                        "Throttler set limits for activity %s, rse %s" %
                        (activity, rse_name))
                    set_rse_transfer_limits(rse=None,
                                            activity=activity,
                                            rse_id=dest_rse_id,
                                            max_transfers=threshold,
                                            transfers=transfer,
                                            waitings=waiting)
                    record_gauge(
                        'daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.max_transfers'
                        % (activity, rse_name), threshold)
                    record_gauge(
                        'daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.transfers'
                        % (activity, rse_name), transfer)
                    record_gauge(
                        'daemons.conveyor.throttler.set_rse_transfer_limits.%s.%s.waitings'
                        % (activity, rse_name), waiting)
                    if transfer < 0.8 * threshold:
                        # release requests on account
                        nr_accounts = len(
                            result_dict[activity][dest_rse_id]['accounts'])
                        if nr_accounts < 1:
                            nr_accounts = 1
                        to_release = threshold - transfer
                        threshold_per_account = math.ceil(threshold /
                                                          nr_accounts)
                        to_release_per_account = math.ceil(to_release /
                                                           nr_accounts)
                        accounts = result_dict[activity][dest_rse_id][
                            'accounts']
                        for account in accounts:
                            if nr_accounts == 1:
                                logging.debug(
                                    "Throttler release %s waiting requests for activity %s, rse %s, account %s "
                                    %
                                    (to_release, activity, rse_name, account))
                                release_waiting_requests(rse=None,
                                                         activity=activity,
                                                         rse_id=dest_rse_id,
                                                         account=account,
                                                         count=to_release)
                                record_gauge(
                                    'daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s'
                                    % (activity, rse_name, account),
                                    to_release)

                            elif accounts[account][
                                    'transfer'] > threshold_per_account:
                                logging.debug(
                                    "Throttler will not release  %s waiting requests for activity %s, rse %s, account %s: It queued more transfers than its share "
                                    % (accounts[account]['waiting'], activity,
                                       rse_name, account))
                                nr_accounts -= 1
                                to_release_per_account = math.ceil(to_release /
                                                                   nr_accounts)
                            elif accounts[account][
                                    'waiting'] < to_release_per_account:
                                logging.debug(
                                    "Throttler release %s waiting requests for activity %s, rse %s, account %s "
                                    % (accounts[account]['waiting'], activity,
                                       rse_name, account))
                                release_waiting_requests(
                                    rse=None,
                                    activity=activity,
                                    rse_id=dest_rse_id,
                                    account=account,
                                    count=accounts[account]['waiting'])
                                record_gauge(
                                    'daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s'
                                    % (activity, rse_name, account),
                                    accounts[account]['waiting'])

                                to_release = to_release - accounts[account][
                                    'waiting']
                                nr_accounts -= 1
                                to_release_per_account = math.ceil(to_release /
                                                                   nr_accounts)
                            else:
                                logging.debug(
                                    "Throttler release %s waiting requests for activity %s, rse %s, account %s "
                                    % (to_release_per_account, activity,
                                       rse_name, account))
                                release_waiting_requests(
                                    rse=None,
                                    activity=activity,
                                    rse_id=dest_rse_id,
                                    account=account,
                                    count=to_release_per_account)
                                record_gauge(
                                    'daemons.conveyor.throttler.release_waiting_requests.%s.%s.%s'
                                    % (activity, rse_name, account),
                                    to_release_per_account)

                                to_release = to_release - to_release_per_account
                                nr_accounts -= 1
                    else:
                        logging.debug(
                            "Throttler has done nothing for activity %s on rse %s (transfer > 0.8 * threshold)"
                            % (activity, rse_name))

                elif waiting > 0:
                    logging.debug(
                        "Throttler remove limits(threshold: %s) and release all waiting requests for activity %s, rse %s"
                        % (threshold, activity, rse_name))
                    delete_rse_transfer_limits(rse=None,
                                               activity=activity,
                                               rse_id=dest_rse_id)
                    release_waiting_requests(rse=None,
                                             activity=activity,
                                             rse_id=dest_rse_id)
                    record_counter(
                        'daemons.conveyor.throttler.delete_rse_transfer_limits.%s.%s'
                        % (activity, rse_name))
    except:
        logging.critical("Failed to schedule requests, error: %s" %
                         (traceback.format_exc()))
コード例 #10
0
    def test_release_waiting_requests_per_free_volume(self):
        """ REQUEST (CORE): release waiting requests that fit grouped in available volume."""
        if self.dialect == 'mysql':
            return True

        # release unattached requests that fit in available volume with respect to already submitted transfers
        name1 = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name1,
                    1,
                    self.account,
                    session=self.db_session)
        name2 = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name2,
                    1,
                    self.account,
                    session=self.db_session)
        name3 = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name3,
                    1,
                    self.account,
                    session=self.db_session)
        request = models.Request(dest_rse_id=self.dest_rse_id,
                                 bytes=2,
                                 activity=self.all_activities,
                                 state=constants.RequestState.SUBMITTED)
        request.save(session=self.db_session)
        volume = 10
        set_rse_transfer_limits(self.dest_rse_id,
                                'all_activities',
                                volume=volume,
                                max_transfers=1,
                                session=self.db_session)
        requests = [{
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name1,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'requested_at': datetime.now().replace(year=2015),
            'attributes': {
                'activity': 'User Subscription',
                'bytes': 8,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name2,
            'requested_at': datetime.now().replace(year=2020),
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'attributes': {
                'activity': 'User Subscription',
                'bytes': 2,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name3,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'requested_at': datetime.now().replace(year=2000),
            'attributes': {
                'activity': 'User Subscription',
                'bytes': 10,
                'md5': '',
                'adler32': ''
            }
        }]
        queue_requests(requests, session=self.db_session)
        release_waiting_requests_per_free_volume(self.dest_rse_id,
                                                 volume=volume,
                                                 session=self.db_session)
        # released because small enough
        request = get_request_by_did(self.scope,
                                     name1,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.QUEUED)
        # still waiting because requested later and to big
        request = get_request_by_did(self.scope,
                                     name2,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.WAITING)
        # still waiting because too big
        request = get_request_by_did(self.scope,
                                     name3,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.WAITING)

        # release attached requests that fit together with the dataset in available volume with respect to already submitted transfers
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name1 = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name1,
                    1,
                    self.account,
                    session=self.db_session)
        name2 = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name2,
                    1,
                    self.account,
                    session=self.db_session)
        name3 = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name3,
                    1,
                    self.account,
                    session=self.db_session)
        name4 = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name4,
                    1,
                    self.account,
                    session=self.db_session)
        dataset1_name = generate_uuid()
        add_did(self.scope,
                dataset1_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        attach_dids(self.scope,
                    dataset1_name, [{
                        'name': name1,
                        'scope': self.scope
                    }, {
                        'name': name4,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        dataset2_name = generate_uuid()
        add_did(self.scope,
                dataset2_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        attach_dids(self.scope,
                    dataset2_name, [{
                        'name': name2,
                        'scope': self.scope
                    }, {
                        'name': name3,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        request = models.Request(dest_rse_id=self.dest_rse_id,
                                 bytes=2,
                                 activity=self.all_activities,
                                 state=constants.RequestState.SUBMITTED)
        request.save(session=self.db_session)
        volume = 10
        set_rse_transfer_limits(self.dest_rse_id,
                                'all_activities',
                                volume=volume,
                                max_transfers=1,
                                session=self.db_session)
        requests = [{
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name1,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'requested_at': datetime.now().replace(year=2015),
            'attributes': {
                'activity': 'User Subscription',
                'bytes': 6,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name2,
            'requested_at': datetime.now().replace(year=2020),
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'attributes': {
                'activity': 'User Subscription',
                'bytes': 2,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name3,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'requested_at': datetime.now().replace(year=2000),
            'attributes': {
                'activity': 'User Subscription',
                'bytes': 10,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name4,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'requested_at': datetime.now().replace(year=2030),
            'attributes': {
                'activity': 'User Subscription',
                'bytes': 2,
                'md5': '',
                'adler32': ''
            }
        }]
        queue_requests(requests, session=self.db_session)
        release_waiting_requests_per_free_volume(self.dest_rse_id,
                                                 volume=volume,
                                                 session=self.db_session)
        # released because dataset fits in volume
        request = get_request_by_did(self.scope,
                                     name1,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.QUEUED)
        request = get_request_by_did(self.scope,
                                     name4,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.QUEUED)
        # waiting because dataset is too big
        request = get_request_by_did(self.scope,
                                     name2,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.WAITING)
        request = get_request_by_did(self.scope,
                                     name3,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.WAITING)

        # release requests with no available volume -> release nothing
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name1 = generate_uuid()
        add_replica(self.dest_rse_id,
                    self.scope,
                    name1,
                    1,
                    self.account,
                    session=self.db_session)
        volume = 0
        set_rse_transfer_limits(self.dest_rse_id,
                                'all_activities',
                                volume=volume,
                                max_transfers=1,
                                session=self.db_session)
        requests = [{
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name1,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'requested_at': datetime.now().replace(year=2015),
            'attributes': {
                'activity': 'User Subscription',
                'bytes': 8,
                'md5': '',
                'adler32': ''
            }
        }]
        queue_requests(requests, session=self.db_session)
        release_waiting_requests_per_free_volume(self.dest_rse_id,
                                                 volume=volume,
                                                 session=self.db_session)
        # waiting because no available volume
        request = get_request_by_did(self.scope,
                                     name1,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.WAITING)
コード例 #11
0
    def test_release_waiting_requests_grouped_fifo(self):
        """ REQUEST (CORE): release waiting requests based on grouped FIFO. """
        if self.dialect == 'mysql':
            return True

        # set max_volume to 0 to check first without releasing extra requests
        set_rse_transfer_limits(self.dest_rse_id,
                                self.all_activities,
                                volume=0,
                                max_transfers=1,
                                session=self.db_session)

        # one request with an unattached DID -> one request should be released
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name,
                    1,
                    self.account,
                    session=self.db_session)
        requests = [{
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }]
        queue_requests(requests, session=self.db_session)
        release_waiting_requests_grouped_fifo(self.dest_rse_id,
                                              count=1,
                                              session=self.db_session)
        request = get_request_by_did(self.scope,
                                     name,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.QUEUED)

        # one request with an attached DID -> one request should be released
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name = generate_uuid()
        dataset_name = generate_uuid()
        add_replica(self.source_rse_id,
                    self.scope,
                    name,
                    1,
                    self.account,
                    session=self.db_session)
        add_did(self.scope,
                dataset_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        attach_dids(self.scope,
                    dataset_name, [{
                        'name': name,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        requests = [{
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name,
            'rule_id': generate_uuid(),
            'retry_count': 1,
            'scope': self.scope,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }]
        queue_requests(requests, session=self.db_session)
        release_waiting_requests_grouped_fifo(self.dest_rse_id,
                                              count=1,
                                              session=self.db_session)
        request = get_request_by_did(self.scope,
                                     name,
                                     self.dest_rse_id,
                                     session=self.db_session)
        assert_equal(request['state'], constants.RequestState.QUEUED)

        # five requests with different requested_at and multiple attachments per collection -> release only one request -> two requests of one collection should be released
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name1 = generate_uuid()
        name2 = generate_uuid()
        name3 = generate_uuid()
        name4 = generate_uuid()
        name5 = generate_uuid()
        dataset_1_name = generate_uuid()
        add_did(self.scope,
                dataset_1_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        dataset_2_name = generate_uuid()
        add_did(self.scope,
                dataset_2_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name1,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name2,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name3,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name4,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name5,
                    1,
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_1_name, [{
                        'name': name1,
                        'scope': self.scope
                    }, {
                        'name': name2,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_2_name, [{
                        'name': name3,
                        'scope': self.scope
                    }, {
                        'name': name4,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)

        requests = [{
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name1,
            'scope': self.scope,
            'retry_count': 1,
            'rule_id': generate_uuid(),
            'requested_at': datetime.now().replace(year=2000),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name2,
            'requested_at': datetime.now().replace(year=2020),
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name3,
            'requested_at': datetime.now().replace(year=2015),
            'retry_count': 1,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name4,
            'requested_at': datetime.now().replace(year=2010),
            'retry_count': 1,
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'source_rse_id': self.source_rse_id,
            'dest_rse_id': self.dest_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name5,
            'retry_count': 1,
            'requested_at': datetime.now().replace(year=2018),
            'scope': self.scope,
            'rule_id': generate_uuid(),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }]
        queue_requests(requests, session=self.db_session)
        release_waiting_requests_grouped_fifo(self.dest_rse_id,
                                              count=1,
                                              session=self.db_session)
        request_1 = get_request_by_did(self.scope,
                                       name1,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_1['state'], constants.RequestState.QUEUED)
        request_2 = get_request_by_did(self.scope,
                                       name2,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_2['state'], constants.RequestState.QUEUED)
        request_3 = get_request_by_did(self.scope,
                                       name3,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_3['state'], constants.RequestState.WAITING)
        request_4 = get_request_by_did(self.scope,
                                       name4,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_4['state'], constants.RequestState.WAITING)
        request_5 = get_request_by_did(self.scope,
                                       name5,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_5['state'], constants.RequestState.WAITING)

        # with maximal volume check -> release one request -> three requests should be released because of attachments and free volume space
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name1 = generate_uuid()
        name2 = generate_uuid()
        name3 = generate_uuid()
        dataset_1_name = generate_uuid()
        add_did(self.scope,
                dataset_1_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name1,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name2,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name3,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name4,
                    1,
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_1_name, [{
                        'name': name1,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_1_name, [{
                        'name': name2,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        set_rse_transfer_limits(self.dest_rse_id,
                                self.all_activities,
                                volume=10,
                                max_transfers=1,
                                session=self.db_session)
        requests = [
            {
                'source_rse_id': self.source_rse_id,
                'dest_rse_id': self.dest_rse_id,
                'request_type': constants.RequestType.TRANSFER,
                'request_id': generate_uuid(),
                'name': name1,
                'bytes': 1,
                'scope': self.scope,
                'retry_count': 1,
                'rule_id': generate_uuid(),
                'requested_at': datetime.now().replace(year=2000),
                'attributes': {
                    'activity': self.user_activity,
                    'bytes': 1,
                    'md5': '',
                    'adler32': ''
                }
            },
            {
                'source_rse_id': self.source_rse_id,
                'dest_rse_id': self.dest_rse_id,
                'request_type': constants.RequestType.TRANSFER,
                'request_id': generate_uuid(),
                'name': name2,
                'bytes': 2,
                'requested_at': datetime.now().replace(year=2020),
                'rule_id': generate_uuid(),
                'scope': self.scope,
                'retry_count': 1,
                'attributes': {
                    'activity': self.user_activity,
                    'bytes': 2,
                    'md5': '',
                    'adler32': ''
                }
            },
            {
                'source_rse_id': self.source_rse_id,
                'dest_rse_id': self.dest_rse_id,
                'request_type': constants.RequestType.TRANSFER,
                'request_id': generate_uuid(),
                'name': name3,
                'bytes': 3,
                'requested_at': datetime.now().replace(
                    year=2021
                ),  # requested after the request below but small enough for max_volume check
                'rule_id': generate_uuid(),
                'scope': self.scope,
                'retry_count': 1,
                'attributes': {
                    'activity': self.user_activity,
                    'bytes': 3,
                    'md5': '',
                    'adler32': ''
                }
            },
            {
                'source_rse_id': self.source_rse_id,
                'dest_rse_id': self.dest_rse_id,
                'request_type': constants.RequestType.TRANSFER,
                'request_id': generate_uuid(),
                'name': name4,
                'bytes': 3000,
                'requested_at': datetime.now().replace(year=2020),
                'rule_id': generate_uuid(),
                'scope': self.scope,
                'retry_count': 1,
                'attributes': {
                    'activity': self.user_activity,
                    'bytes': 3000,
                    'md5': '',
                    'adler32': ''
                }
            }
        ]

        queue_requests(requests, session=self.db_session)
        amount_updated_requests = release_waiting_requests_grouped_fifo(
            self.dest_rse_id, count=1, session=self.db_session)
        assert_equal(amount_updated_requests, 3)
        # released because it got requested first
        request_1 = get_request_by_did(self.scope,
                                       name1,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_1['state'], constants.RequestState.QUEUED)
        # released because the DID is attached to the same dataset
        request_2 = get_request_by_did(self.scope,
                                       name2,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_2['state'], constants.RequestState.QUEUED)
        # released because of available volume
        request_3 = get_request_by_did(self.scope,
                                       name3,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_3['state'], constants.RequestState.QUEUED)
        # still waiting because there is no free volume
        request_4 = get_request_by_did(self.scope,
                                       name4,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_4['state'], constants.RequestState.WAITING)

        # with maximal volume check -> release one request -> two requests should be released because of attachments
        self.db_session.query(models.Request).delete()
        self.db_session.commit()
        name1 = generate_uuid()
        name2 = generate_uuid()
        name3 = generate_uuid()
        name4 = generate_uuid()
        dataset_1_name = generate_uuid()
        add_did(self.scope,
                dataset_1_name,
                constants.DIDType.DATASET,
                self.account,
                session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name1,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name2,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name3,
                    1,
                    self.account,
                    session=self.db_session)
        add_replica(self.source_rse_id,
                    self.scope,
                    name4,
                    1,
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_1_name, [{
                        'name': name1,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        attach_dids(self.scope,
                    dataset_1_name, [{
                        'name': name2,
                        'scope': self.scope
                    }],
                    self.account,
                    session=self.db_session)
        set_rse_transfer_limits(self.dest_rse_id,
                                self.all_activities,
                                volume=5,
                                max_transfers=1,
                                session=self.db_session)
        request = models.Request(dest_rse_id=self.dest_rse_id,
                                 bytes=2,
                                 activity=self.all_activities,
                                 state=constants.RequestState.SUBMITTED)
        request.save(session=self.db_session)
        requests = [{
            'dest_rse_id': self.dest_rse_id,
            'source_rse_id': self.source_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name1,
            'bytes': 1,
            'scope': self.scope,
            'retry_count': 1,
            'rule_id': generate_uuid(),
            'requested_at': datetime.now().replace(year=2000),
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'source_rse_id': self.source_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name2,
            'bytes': 2,
            'requested_at': datetime.now().replace(year=2020),
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 2,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'source_rse_id': self.source_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name3,
            'bytes': 1,
            'requested_at': datetime.now().replace(year=2020),
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }, {
            'dest_rse_id': self.dest_rse_id,
            'source_rse_id': self.source_rse_id,
            'request_type': constants.RequestType.TRANSFER,
            'request_id': generate_uuid(),
            'name': name4,
            'bytes': 1,
            'requested_at': datetime.now().replace(year=2020),
            'rule_id': generate_uuid(),
            'scope': self.scope,
            'retry_count': 1,
            'attributes': {
                'activity': self.user_activity,
                'bytes': 1,
                'md5': '',
                'adler32': ''
            }
        }]

        queue_requests(requests, session=self.db_session)
        release_waiting_requests_grouped_fifo(self.dest_rse_id,
                                              count=1,
                                              session=self.db_session)
        # released because it got requested first
        request_1 = get_request_by_did(self.scope,
                                       name1,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_1['state'], constants.RequestState.QUEUED)
        # released because the DID is attached to the same dataset
        request_2 = get_request_by_did(self.scope,
                                       name2,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_2['state'], constants.RequestState.QUEUED)
        # still waiting because there is no free volume after releasing the two requests above
        request_3 = get_request_by_did(self.scope,
                                       name3,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_3['state'], constants.RequestState.WAITING)
        request_4 = get_request_by_did(self.scope,
                                       name4,
                                       self.dest_rse_id,
                                       session=self.db_session)
        assert_equal(request_4['state'], constants.RequestState.WAITING)
コード例 #12
0
    def setup(self):
        # New RSE
        self.new_rse = rse_name_generator()

        # RSE 1 that already exists
        self.old_rse_1 = rse_name_generator()
        add_rse(self.old_rse_1, availability=1)
        add_protocol(
            self.old_rse_1, {
                'scheme': 'scheme1',
                'hostname': 'hostname1',
                'port': 1000,
                'impl': 'impl'
            })
        self.old_rse_id_1 = get_rse_id(self.old_rse_1)
        set_rse_limits(rse=self.old_rse_1, name='limit1', value='10')
        set_rse_transfer_limits(rse=self.old_rse_1,
                                activity='activity1',
                                max_transfers=10)
        add_rse_attribute(rse=self.old_rse_1, key='attr1', value='test10')

        # RSE 2 that already exists
        self.old_rse_2 = rse_name_generator()
        add_rse(self.old_rse_2)
        self.old_rse_id_2 = get_rse_id(self.old_rse_2)

        # RSE 3 that already exists
        self.old_rse_3 = rse_name_generator()
        add_rse(self.old_rse_3)
        self.old_rse_id_3 = get_rse_id(self.old_rse_3)

        # Distance that already exists
        add_distance(self.old_rse_id_1, self.old_rse_id_2)

        self.data1 = {
            'rses': [{
                'rse': self.new_rse,
                'rse_type': 'TAPE',
                'availability': 5,
                'city': 'NewCity',
                'protocols': {
                    'protocols': [{
                        'scheme': 'scheme',
                        'hostname': 'hostname',
                        'port': 1000,
                        'impl': 'impl'
                    }]
                },
                'limits': {
                    'limit1': 0
                },
                'transfer_limits': {
                    'activity1': {
                        'unknown_rse_id': {
                            'max_transfers': 1
                        }
                    }
                },
                'attributes': {
                    'attr1': 'test'
                }
            }, {
                'rse': self.old_rse_1,
                'protocols': {
                    'protocols': [{
                        'scheme': 'scheme1',
                        'hostname': 'hostname1',
                        'port': 1000,
                        'prefix': 'prefix',
                        'impl': 'impl1'
                    }, {
                        'scheme': 'scheme2',
                        'hostname': 'hostname2',
                        'port': 1001,
                        'impl': 'impl'
                    }]
                },
                'limits': {
                    'limit1': 0,
                    'limit2': 2
                },
                'transfer_limits': {
                    'activity1': {
                        self.old_rse_id_1: {
                            'max_transfers': 1
                        }
                    },
                    'activity2': {
                        self.old_rse_id_1: {
                            'max_transfers': 2
                        }
                    }
                },
                'attributes': {
                    'attr1': 'test1',
                    'attr2': 'test2'
                }
            }],
            'distances': {
                self.old_rse_1: {
                    self.old_rse_2: {
                        'src_rse_id': self.old_rse_id_1,
                        'dest_rse_id': self.old_rse_id_2,
                        'ranking': 10
                    },
                    self.old_rse_3: {
                        'src_rse_id': self.old_rse_id_1,
                        'dest_rse_id': self.old_rse_id_3,
                        'ranking': 4
                    }
                }
            }
        }
        self.data2 = {'rses': [{'rse': self.new_rse}]}
        self.data3 = {'distances': {}}