def test_lifetime_truncate_expiration(root_account, rse_factory, mock_scope, did_factory, rucio_client): """ Test the duration of a lifetime exception is truncated if max_extension is defined """ nb_datasets = 2 today = datetime.now() yesterday = today - timedelta(days=1) tomorrow = today + timedelta(days=1) next_year = today + timedelta(days=365) rse, rse_id = rse_factory.make_posix_rse() datasets = [did_factory.make_dataset() for _ in range(nb_datasets)] metadata = str(uuid()) for dataset in datasets: set_metadata(dataset['scope'], dataset['name'], 'datatype', metadata) set_metadata(dataset['scope'], dataset['name'], 'eol_at', yesterday) client_datasets = list() for dataset in datasets: client_datasets.append({'scope': dataset['scope'].external, 'name': dataset['name'], 'did_type': 'DATASET'}) tomorrow = tomorrow.strftime('%Y-%m-%d') config_core.set(section='lifetime_model', option='cutoff_date', value=tomorrow) config_core.get(section='lifetime_model', option='cutoff_date', default=None, use_cache=False) config_core.set(section='lifetime_model', option='max_extension', value=30) config_core.get(section='lifetime_model', option='max_extension', default=None, use_cache=False) result = rucio_client.add_exception(client_datasets, account='root', pattern='wekhewfk', comments='This is a comment', expires_at=next_year) exception_id = list(result['exceptions'].keys())[0] exception = [exception for exception in rucio_client.list_exceptions() if exception['id'] == exception_id][0] assert exception['expires_at'] - exception['created_at'] < timedelta(31) assert exception['expires_at'] - exception['created_at'] > timedelta(29)
def reset_config_table(): """ Clear the config table and install any default entires needed for the tests. """ db_session = session.get_session() db_session.query(models.Config).delete() db_session.commit() config_db.set("vo-map", "testvo1", "tst") config_db.set("vo-map", "testvo2", "ts2")
def test_get_config_sections(self): """ CONFIG (CORE): Retreive configuration section only """ expected_sections = [str(generate_uuid()), str(generate_uuid())] for section in expected_sections: config_core.set(section, str(generate_uuid()), str(generate_uuid())) sections = config_core.sections(use_cache=False) for section in expected_sections: assert section in sections
def test_throttler_fifo_release_nothing(self): """ THROTTLER (CLIENTS): throttler release nothing (fifo). """ # two waiting requests and one active requests but threshold is 1 # more than 80% of the transfer limit are already used -> release nothing set('throttler', '%s,%s' % (self.user_activity, self.dest_rse), 1, session=self.db_session) request = models.Request(dest_rse_id=self.dest_rse_id, bytes=2, activity=self.user_activity, state=constants.RequestState.SUBMITTED) request.save(session=self.db_session) name1 = generate_uuid() name2 = generate_uuid() add_replica(self.source_rse_id, self.scope, name1, 1, self.account, session=self.db_session) add_replica(self.source_rse_id, self.scope, name2, 1, self.account, session=self.db_session) requests = [{ 'dest_rse_id': self.dest_rse_id, 'source_rse_id': self.source_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name1, 'account': self.account, 'scope': self.scope, 'rule_id': generate_uuid(), 'retry_count': 1, 'requested_at': datetime.now().replace(year=2018), 'attributes': { 'activity': self.user_activity, 'bytes': 1, 'md5': '', 'adler32': '' } }, { 'dest_rse_id': self.dest_rse_id, 'source_rse_id': self.source_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'requested_at': datetime.now().replace(year=2020), 'name': name2, 'account': self.account, 'scope': self.scope, 'rule_id': generate_uuid(), 'retry_count': 1, 'attributes': { 'activity': self.user_activity, 'bytes': 1, 'md5': '', 'adler32': '' } }] queue_requests(requests, session=self.db_session) self.db_session.commit() throttler.run(once=True, sleep_time=1) request = get_request_by_did(self.scope, name1, self.dest_rse_id) assert_equal(request['state'], constants.RequestState.WAITING) request2 = get_request_by_did(self.scope, name2, self.dest_rse_id) assert_equal(request2['state'], constants.RequestState.WAITING)
def setUpClass(cls): cls.db_session = session.get_session() cls.dest_rse = 'MOCK' cls.source_rse = 'MOCK4' cls.dest_rse_id = get_rse_id(cls.dest_rse) cls.source_rse_id = get_rse_id(cls.source_rse) cls.scope = InternalScope('mock') cls.account = InternalAccount('root') cls.user_activity = 'User Subscription' cls.all_activities = 'all_activities' set_rse_transfer_limits(cls.dest_rse_id, cls.user_activity, max_transfers=1, session=cls.db_session) set('throttler_release_strategy', 'dest_%s' % cls.dest_rse_id, 'fifo', session=cls.db_session)
def setUpClass(cls): cls.db_session = session.get_session() cls.dialect = cls.db_session.bind.dialect.name cls.dest_rse = 'MOCK' cls.source_rse = 'MOCK4' cls.dest_rse_id = get_rse_id(cls.dest_rse) cls.source_rse_id = get_rse_id(cls.source_rse) cls.scope = InternalScope('mock') cls.account = InternalAccount('root') cls.user_activity = 'User Subscription' cls.all_activities = 'all_activities' set('throttler_release_strategy', 'dest_%s' % cls.dest_rse_id, 'grouped_fifo')
def test_throttler_fifo_release_subset(self): """ THROTTLER (CLIENTS): throttler release subset of waiting requests (fifo). """ # two waiting requests and no active requests but threshold is 1 -> release only 1 request set('throttler', '%s,%s' % (self.user_activity, self.dest_rse), 1, session=self.db_session) name1 = generate_uuid() name2 = generate_uuid() add_replica(self.source_rse_id, self.scope, name1, 1, self.account, session=self.db_session) add_replica(self.source_rse_id, self.scope, name2, 1, self.account, session=self.db_session) requests = [{ 'dest_rse_id': self.dest_rse_id, 'source_rse_id': self.source_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name1, 'account': self.account, 'scope': self.scope, 'rule_id': generate_uuid(), 'retry_count': 1, 'requested_at': datetime.now().replace(year=2018), 'attributes': { 'activity': self.user_activity, 'bytes': 1, 'md5': '', 'adler32': '' } }, { 'dest_rse_id': self.dest_rse_id, 'source_rse_id': self.source_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'requested_at': datetime.now().replace(year=2020), 'name': name2, 'account': self.account, 'scope': self.scope, 'rule_id': generate_uuid(), 'retry_count': 1, 'attributes': { 'activity': self.user_activity, 'bytes': 1, 'md5': '', 'adler32': '' } }] queue_requests(requests, session=self.db_session) self.db_session.commit() throttler.run(once=True, sleep_time=1) request = get_request_by_did(self.scope, name1, self.dest_rse_id) assert_equal(request['state'], constants.RequestState.QUEUED) request2 = get_request_by_did(self.scope, name2, self.dest_rse_id) assert_equal(request2['state'], constants.RequestState.WAITING)
def dest_throttler(db_session, mock_request): config.set('throttler', 'mode', 'DEST_PER_ACT', session=db_session) set_rse_transfer_limits(mock_request.dest_rse_id, activity=mock_request.activity, max_transfers=1, strategy='fifo', session=db_session) db_session.commit() yield db_session.query(models.RSETransferLimit).filter_by( rse_id=mock_request.dest_rse_id).delete() config.remove_option('throttler', 'mode', session=db_session) db_session.commit()
def set(section, option, value, issuer=None, vo='def', session=None): """ Set the given option to the specified value. :param section: The name of the section. :param option: The name of the option. :param value: The content of the value. :param issuer: The issuer account. :param vo: The VO to act on. :param session: The database session in use. """ kwargs = { 'issuer': issuer, 'section': section, 'option': option, 'value': value } if not permission.has_permission(issuer=issuer, vo=vo, action='config_set', kwargs=kwargs, session=session): raise exception.AccessDenied( '%s cannot set option %s to %s in section %s' % (issuer, option, value, section)) return config.set(section, option, value, session=session)
def setUpClass(cls): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): cls.vo = {'vo': 'tst'} else: cls.vo = {} cls.db_session = session.get_session() cls.dest_rse = 'MOCK' cls.source_rse = 'MOCK4' cls.dest_rse_id = get_rse_id(cls.dest_rse, **cls.vo) cls.source_rse_id = get_rse_id(cls.source_rse, **cls.vo) cls.scope = InternalScope('mock', **cls.vo) cls.account = InternalAccount('root', **cls.vo) cls.user_activity = 'User Subscription' cls.all_activities = 'all_activities' set_rse_transfer_limits(cls.dest_rse_id, cls.user_activity, max_transfers=1, session=cls.db_session) set('throttler_release_strategy', 'dest_%s' % cls.dest_rse_id, 'fifo', session=cls.db_session)
def test_get_and_set_section_option(self): """ CONFIG (CORE): Retreive configuration option only """ # get and set section = str(generate_uuid()) option = str(generate_uuid()) expected_value = str(generate_uuid()) config_core.set(section=section, option=option, value=expected_value) value = config_core.get(section, option, use_cache=False) assert_equal(value, expected_value) # default value section = str(generate_uuid()) config_core.set(section=section, option=str(generate_uuid()), value=str(generate_uuid())) default_value = 'default' value = config_core.get(section, 'new_option', default=default_value, use_cache=False) assert_equal(value, default_value)
def set(section, option, value, issuer=None): """ Set the given option to the specified value. :param section: The name of the section. :param option: The name of the option. :param value: The content of the value. :param issuer: The issuer account. """ kwargs = {'issuer': issuer, 'section': section, 'option': option, 'value': value} if not permission.has_permission(issuer=issuer, action='config_set', kwargs=kwargs): raise exception.AccessDenied('%s cannot set option %s to %s in section %s' % (issuer, option, value, section)) return config.set(section, option, value)
def test_throttler_grouped_fifo_subset(self): """ THROTTLER (CLIENTS): throttler release subset of waiting requests (grouped fifo). """ set_rse_transfer_limits(self.dest_rse_id, self.all_activities, volume=10, max_transfers=1, session=self.db_session) set('throttler', '%s,%s' % (self.all_activities, self.dest_rse), 1, session=self.db_session) # threshold used by throttler name1 = generate_uuid() name2 = generate_uuid() name3 = generate_uuid() name4 = generate_uuid() dataset_1_name = generate_uuid() add_did(self.scope, dataset_1_name, constants.DIDType.DATASET, self.account, session=self.db_session) add_replica(self.source_rse_id, self.scope, name1, 1, self.account, session=self.db_session) add_replica(self.source_rse_id, self.scope, name2, 1, self.account, session=self.db_session) add_replica(self.source_rse_id, self.scope, name3, 1, self.account, session=self.db_session) add_replica(self.source_rse_id, self.scope, name4, 1, self.account, session=self.db_session) attach_dids(self.scope, dataset_1_name, [{'name': name1, 'scope': self.scope}], self.account, session=self.db_session) attach_dids(self.scope, dataset_1_name, [{'name': name2, 'scope': self.scope}], self.account, session=self.db_session) requests = [{ 'source_rse_id': self.source_rse_id, 'dest_rse_id': self.dest_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name1, 'bytes': 1, 'scope': self.scope, 'retry_count': 1, 'rule_id': generate_uuid(), 'requested_at': datetime.now().replace(year=2000), 'attributes': { 'activity': self.user_activity, 'bytes': 1, 'md5': '', 'adler32': '' } }, { 'source_rse_id': self.source_rse_id, 'dest_rse_id': self.dest_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name2, 'bytes': 2, 'requested_at': datetime.now().replace(year=2020), 'rule_id': generate_uuid(), 'scope': self.scope, 'retry_count': 1, 'attributes': { 'activity': self.user_activity, 'bytes': 2, 'md5': '', 'adler32': '' } }, { 'source_rse_id': self.source_rse_id, 'dest_rse_id': self.dest_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name3, 'bytes': 3, 'requested_at': datetime.now().replace(year=2021), # requested after the request below but small enough for max_volume check 'rule_id': generate_uuid(), 'scope': self.scope, 'retry_count': 1, 'attributes': { 'activity': self.user_activity, 'bytes': 3, 'md5': '', 'adler32': '' } }, { 'source_rse_id': self.source_rse_id, 'dest_rse_id': self.dest_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name4, 'bytes': 3000, 'requested_at': datetime.now().replace(year=2020), 'rule_id': generate_uuid(), 'scope': self.scope, 'retry_count': 1, 'attributes': { 'activity': self.user_activity, 'bytes': 3000, 'md5': '', 'adler32': '' } }] queue_requests(requests, session=self.db_session) self.db_session.commit() throttler.run(once=True, sleep_time=1) # released because it got requested first request_1 = get_request_by_did(self.scope, name1, self.dest_rse_id) assert_equal(request_1['state'], constants.RequestState.QUEUED) # released because the DID is attached to the same dataset request_2 = get_request_by_did(self.scope, name2, self.dest_rse_id) assert_equal(request_2['state'], constants.RequestState.QUEUED) # released because of available volume request_3 = get_request_by_did(self.scope, name3, self.dest_rse_id) assert_equal(request_3['state'], constants.RequestState.QUEUED) # still waiting because there is no free volume request_4 = get_request_by_did(self.scope, name4, self.dest_rse_id) assert_equal(request_4['state'], constants.RequestState.WAITING)
def test_throttler_grouped_fifo_nothing(self): """ THROTTLER (CLIENTS): throttler release nothing (grouped fifo). """ # four waiting requests and one active requests but threshold is 1 # more than 80% of the transfer limit are already used -> release nothing set('throttler', '%s,%s' % (self.all_activities, self.dest_rse), 1, session=self.db_session) request = models.Request(dest_rse_id=self.dest_rse_id, bytes=2, activity=self.user_activity, state=constants.RequestState.SUBMITTED) request.save(session=self.db_session) name1 = generate_uuid() name2 = generate_uuid() name3 = generate_uuid() name4 = generate_uuid() dataset_1_name = generate_uuid() add_did(self.scope, dataset_1_name, constants.DIDType.DATASET, self.account, session=self.db_session) add_replica(self.source_rse_id, self.scope, name1, 1, self.account, session=self.db_session) add_replica(self.source_rse_id, self.scope, name2, 1, self.account, session=self.db_session) add_replica(self.source_rse_id, self.scope, name3, 1, self.account, session=self.db_session) add_replica(self.source_rse_id, self.scope, name4, 1, self.account, session=self.db_session) attach_dids(self.scope, dataset_1_name, [{'name': name1, 'scope': self.scope}], self.account, session=self.db_session) attach_dids(self.scope, dataset_1_name, [{'name': name2, 'scope': self.scope}], self.account, session=self.db_session) requests = [{ 'source_rse_id': self.source_rse_id, 'dest_rse_id': self.dest_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name1, 'bytes': 1, 'scope': self.scope, 'retry_count': 1, 'rule_id': generate_uuid(), 'requested_at': datetime.now().replace(year=2000), 'attributes': { 'activity': self.user_activity, 'bytes': 1, 'md5': '', 'adler32': '' } }, { 'source_rse_id': self.source_rse_id, 'dest_rse_id': self.dest_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name2, 'bytes': 2, 'requested_at': datetime.now().replace(year=2020), 'rule_id': generate_uuid(), 'scope': self.scope, 'retry_count': 1, 'attributes': { 'activity': self.user_activity, 'bytes': 2, 'md5': '', 'adler32': '' } }, { 'source_rse_id': self.source_rse_id, 'dest_rse_id': self.dest_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name3, 'bytes': 3, 'requested_at': datetime.now().replace(year=2021), # requested after the request below but small enough for max_volume check 'rule_id': generate_uuid(), 'scope': self.scope, 'retry_count': 1, 'attributes': { 'activity': self.user_activity, 'bytes': 3, 'md5': '', 'adler32': '' } }, { 'source_rse_id': self.source_rse_id, 'dest_rse_id': self.dest_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name4, 'bytes': 3000, 'requested_at': datetime.now().replace(year=2020), 'rule_id': generate_uuid(), 'scope': self.scope, 'retry_count': 1, 'attributes': { 'activity': self.user_activity, 'bytes': 3000, 'md5': '', 'adler32': '' } }] queue_requests(requests, session=self.db_session) self.db_session.commit() throttler.run(once=True, sleep_time=1) request_1 = get_request_by_did(self.scope, name1, self.dest_rse_id) assert_equal(request_1['state'], constants.RequestState.WAITING) request_2 = get_request_by_did(self.scope, name2, self.dest_rse_id) assert_equal(request_2['state'], constants.RequestState.WAITING) request_3 = get_request_by_did(self.scope, name3, self.dest_rse_id) assert_equal(request_3['state'], constants.RequestState.WAITING) request_4 = get_request_by_did(self.scope, name4, self.dest_rse_id) assert_equal(request_4['state'], constants.RequestState.WAITING)
def test_multihop_sources_created(rse_factory, did_factory, root_account, core_config_mock, caches_mock): """ Ensure that multihop transfers are handled and intermediate request correctly created """ src_rse_name, src_rse_id = rse_factory.make_posix_rse() _, jump_rse1_id = rse_factory.make_posix_rse() _, jump_rse2_id = rse_factory.make_posix_rse() _, jump_rse3_id = rse_factory.make_posix_rse() dst_rse_name, dst_rse_id = rse_factory.make_posix_rse() jump_rses = [jump_rse1_id, jump_rse2_id, jump_rse3_id] all_rses = jump_rses + [src_rse_id, dst_rse_id] for rse_id in jump_rses: rse_core.add_rse_attribute(rse_id, 'available_for_multihop', True) rse_tombstone_delay = 3600 rse_multihop_tombstone_delay = 12 * 3600 default_multihop_tombstone_delay = 24 * 3600 # if both attributes are set, the multihop one will take precedence rse_core.add_rse_attribute(jump_rse1_id, 'tombstone_delay', rse_tombstone_delay) rse_core.add_rse_attribute(jump_rse1_id, 'multihop_tombstone_delay', rse_multihop_tombstone_delay) # if multihop delay not set, it's the default multihop takes precedence. Not normal tombstone delay. rse_core.add_rse_attribute(jump_rse2_id, 'tombstone_delay', rse_tombstone_delay) core_config.set(section='transfers', option='multihop_tombstone_delay', value=default_multihop_tombstone_delay) # if multihop delay is set to 0, the replica will have no tombstone rse_core.add_rse_attribute(jump_rse3_id, 'multihop_tombstone_delay', 0) distance_core.add_distance(src_rse_id, jump_rse1_id, ranking=10) distance_core.add_distance(jump_rse1_id, jump_rse2_id, ranking=10) distance_core.add_distance(jump_rse2_id, jump_rse3_id, ranking=10) distance_core.add_distance(jump_rse3_id, dst_rse_id, ranking=10) did = did_factory.upload_test_file(src_rse_name) rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse_name, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None) submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=None, transfertool='mock', transfertype='single', filter_transfertool=None) # Ensure that each intermediate request was correctly created for rse_id in jump_rses: assert request_core.get_request_by_did(rse_id=rse_id, **did) @read_session def __ensure_source_exists(rse_id, scope, name, session=None): return session.query(Source). \ filter(Source.rse_id == rse_id). \ filter(Source.scope == scope). \ filter(Source.name == name). \ one() # Ensure that sources where created for transfers for rse_id in jump_rses + [src_rse_id]: __ensure_source_exists(rse_id, **did) # Ensure the tombstone is correctly set on intermediate replicas expected_tombstone = datetime.utcnow() + timedelta(seconds=rse_multihop_tombstone_delay) replica = replica_core.get_replica(jump_rse1_id, **did) assert expected_tombstone - timedelta(minutes=5) < replica['tombstone'] < expected_tombstone + timedelta(minutes=5) expected_tombstone = datetime.utcnow() + timedelta(seconds=default_multihop_tombstone_delay) replica = replica_core.get_replica(jump_rse2_id, **did) assert expected_tombstone - timedelta(minutes=5) < replica['tombstone'] < expected_tombstone + timedelta(minutes=5) replica = replica_core.get_replica(jump_rse3_id, **did) assert replica['tombstone'] is None
def test_lifetime_creation_core(root_account, rse_factory, mock_scope, did_factory): """ Test the creation of a lifetime exception on the core side """ nb_datatype = 3 nb_datasets = 2 * nb_datatype yesterday = datetime.now() - timedelta(days=1) tomorrow = datetime.now() + timedelta(days=1) rse, rse_id = rse_factory.make_posix_rse() datasets = [did_factory.make_dataset() for _ in range(nb_datasets)] metadata = [str(uuid()) for _ in range(nb_datatype)] list_dids = [] for cnt, meta in enumerate(metadata): dids = [] for dataset in datasets[2 * cnt:2 * (cnt + 1)]: set_metadata(dataset['scope'], dataset['name'], 'datatype', meta) if cnt < nb_datatype - 1: set_metadata(dataset['scope'], dataset['name'], 'eol_at', yesterday) dids.append((dataset['scope'], dataset['name'])) dids.sort() list_dids.append(dids) datasets.extend([{ 'scope': mock_scope, 'name': 'dataset_%s' % str(uuid()), 'did_type': DIDType.DATASET } for _ in range(2)]) # Test with cutoff_date not defined try: config_core.remove_option('lifetime_model', 'cutoff_date') except (ConfigNotFound, NoSectionError): pass with pytest.raises(UnsupportedOperation): add_exception(datasets, root_account, pattern='wekhewfk', comments='This is a comment', expires_at=datetime.now()) # Test with cutoff_date wrongly defined config_core.set(section='lifetime_model', option='cutoff_date', value='wrong_value') config_core.get(section='lifetime_model', option='cutoff_date', default=None, use_cache=False) with pytest.raises(UnsupportedOperation): add_exception(datasets, root_account, pattern='wekhewfk', comments='This is a comment', expires_at=datetime.now()) # Test with cutoff_date properly defined tomorrow = tomorrow.strftime('%Y-%m-%d') config_core.set(section='lifetime_model', option='cutoff_date', value=tomorrow) config_core.get(section='lifetime_model', option='cutoff_date', default=None, use_cache=False) result = add_exception(datasets, root_account, pattern='wekhewfk', comments='This is a comment', expires_at=datetime.now()) # Check if the Not Existing DIDs are identified result_unknown = [(entry['scope'], entry['name']) for entry in result['unknown']] result_unknown.sort() unknown = [(entry['scope'], entry['name']) for entry in datasets[nb_datasets:nb_datasets + 2]] unknown.sort() assert result_unknown == unknown # Check if the DIDs not affected by the Lifetime Model are identified result_not_affected = [(entry['scope'], entry['name']) for entry in result['not_affected']] result_not_affected.sort() not_affected = list_dids[-1] assert result_not_affected == not_affected # Check if an exception was done for each datatype list_exceptions = list() for exception_id in result['exceptions']: dids = [(entry['scope'], entry['name']) for entry in result['exceptions'][exception_id]] dids.sort() list_exceptions.append(dids) for did in list_dids[:nb_datatype - 1]: assert did in list_exceptions
def test_bb8_full_workflow(vo, root_account, jdoe_account, rse_factory, mock_scope, did_factory): """BB8: Test the rebalance rule method""" config_core.set(section='bb8', option='allowed_accounts', value='jdoe') tot_rses = 4 rses = [rse_factory.make_posix_rse() for _ in range(tot_rses)] rse1, rse1_id = rses[0] rse2, rse2_id = rses[1] rse3, rse3_id = rses[2] rse4, rse4_id = rses[3] # Add Tags # RSE 1 and 2 nmatch expression T1=true # RSE 3 and 4 nmatch expression T2=true T1 = tag_generator() T2 = tag_generator() add_rse_attribute(rse1_id, T1, True) add_rse_attribute(rse2_id, T1, True) add_rse_attribute(rse3_id, T2, True) add_rse_attribute(rse4_id, T2, True) # Add fake weights add_rse_attribute(rse1_id, "fakeweight", 10) add_rse_attribute(rse2_id, "fakeweight", 0) add_rse_attribute(rse3_id, "fakeweight", 0) add_rse_attribute(rse4_id, "fakeweight", 0) add_rse_attribute(rse1_id, "freespace", 1) add_rse_attribute(rse2_id, "freespace", 1) add_rse_attribute(rse3_id, "freespace", 1) add_rse_attribute(rse4_id, "freespace", 1) # Add quota set_local_account_limit(jdoe_account, rse1_id, -1) set_local_account_limit(jdoe_account, rse2_id, -1) set_local_account_limit(jdoe_account, rse3_id, -1) set_local_account_limit(jdoe_account, rse4_id, -1) set_local_account_limit(root_account, rse1_id, -1) set_local_account_limit(root_account, rse2_id, -1) set_local_account_limit(root_account, rse3_id, -1) set_local_account_limit(root_account, rse4_id, -1) # Invalid the cache because the result of parse_expression is cached REGION.invalidate() tot_datasets = 4 # Create a list of datasets datasets = [did_factory.make_dataset() for _ in range(tot_datasets)] dsn = [dataset['name'] for dataset in datasets] rules = list() base_unit = 100000000000 nb_files1 = 7 nb_files2 = 5 nb_files3 = 3 nb_files4 = 2 file_size = 1 * base_unit rule_to_rebalance = None # Add one secondary file files = create_files(1, mock_scope, rse1_id, bytes_=1) add_rule(dids=[{ 'scope': mock_scope, 'name': files[0]['name'] }], account=jdoe_account, copies=1, rse_expression=rse1, grouping='DATASET', weight=None, lifetime=-86400, locked=False, subscription_id=None)[0] for cnt in range(3, tot_rses): add_replicas(rses[cnt][1], files, jdoe_account) add_rule(dids=[{ 'scope': mock_scope, 'name': files[0]['name'] }], account=jdoe_account, copies=1, rse_expression=rses[cnt][0], grouping='DATASET', weight=None, lifetime=-86400, locked=False, subscription_id=None)[0] rule_cleaner(once=True) # Create dataset 1 of 800 GB and create a rule on RSE 1 and RSE 3 files = create_files(nb_files1, mock_scope, rse1_id, bytes_=file_size) attach_dids(mock_scope, dsn[0], files, jdoe_account) rule_id = add_rule(dids=[{ 'scope': mock_scope, 'name': dsn[0] }], account=jdoe_account, copies=1, rse_expression=rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] rules.append(rule_id) add_replicas(rse3_id, files, jdoe_account) rule_id = add_rule(dids=[{ 'scope': mock_scope, 'name': dsn[0] }], account=jdoe_account, copies=1, rse_expression=rse3, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] rules.append(rule_id) # Create dataset 2 of 500 GB and create a rule on RSE 1 and RSE 2 files = create_files(nb_files2, mock_scope, rse1_id, bytes_=file_size) attach_dids(mock_scope, dsn[1], files, jdoe_account) rule_id = add_rule(dids=[{ 'scope': mock_scope, 'name': dsn[1] }], account=jdoe_account, copies=1, rse_expression=rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] rules.append(rule_id) add_replicas(rse2_id, files, jdoe_account) rule_id = add_rule(dids=[{ 'scope': mock_scope, 'name': dsn[1] }], account=jdoe_account, copies=1, rse_expression=rse2, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] rules.append(rule_id) # Create dataset 3 of 300 GB and create a rule on RSE 1. The copy on RSE 3 is secondary files = create_files(nb_files3, mock_scope, rse1_id, bytes_=file_size) attach_dids(mock_scope, dsn[2], files, jdoe_account) rule_id = add_rule(dids=[{ 'scope': mock_scope, 'name': dsn[2] }], account=jdoe_account, copies=1, rse_expression=rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] rule_to_rebalance = rule_id rules.append(rule_id) add_replicas(rse3_id, files, jdoe_account) rule_id = add_rule(dids=[{ 'scope': mock_scope, 'name': dsn[2] }], account=jdoe_account, copies=1, rse_expression=rse3, grouping='DATASET', weight=None, lifetime=-86400, locked=False, subscription_id=None)[0] rule_cleaner(once=True) try: rule = get_rule(rule_id) except: pytest.raises(RuleNotFound, get_rule, rule_id) # Create dataset 4 of 200 GB and create a rule on RSE 3. The copy on RSE 2 is secondary files = create_files(nb_files4, mock_scope, rse3_id, bytes_=file_size) attach_dids(mock_scope, dsn[3], files, jdoe_account) rule_id = add_rule(dids=[{ 'scope': mock_scope, 'name': dsn[3] }], account=jdoe_account, copies=1, rse_expression=rse3, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] rules.append(rule_id) add_replicas(rse2_id, files, jdoe_account) rule_id = add_rule(dids=[{ 'scope': mock_scope, 'name': dsn[3] }], account=jdoe_account, copies=1, rse_expression=rse2, grouping='DATASET', weight=None, lifetime=-86400, locked=False, subscription_id=None)[0] rule_cleaner(once=True) try: rule = get_rule(rule_id) except: pytest.raises(RuleNotFound, get_rule, rule_id) for dataset in dsn: set_status(mock_scope, dataset, open=False) for rse in rses: fill_rse_expired(rse[1]) set_rse_usage(rse_id=rse[1], source='min_free_space', used=2 * base_unit, free=2 * base_unit, session=None) set_rse_usage(rse_id=rse[1], source='storage', used=15 * base_unit, free=2 * base_unit, session=None) set_rse_usage(rse_id=rse2_id, source='min_free_space', used=1 * base_unit, free=1 * base_unit, session=None) set_rse_usage(rse_id=rse2_id, source='storage', used=6 * base_unit, free=5 * base_unit, session=None) run_abacus(once=True, threads=1, fill_history_table=False, sleep_time=10) # Summary : # RSE 1 : 1500 GB primary + 1 B secondary tot_space = [ src for src in get_rse_usage(rse1_id) if src['source'] == 'rucio' ][0] expired = [ src for src in get_rse_usage(rse1_id) if src['source'] == 'expired' ][0] assert tot_space['used'] == (nb_files1 + nb_files2 + nb_files3) * file_size + 1 assert expired['used'] == 1 # RSE 2 : 500 GB primary + 100 GB secondary tot_space = [ src for src in get_rse_usage(rse2_id) if src['source'] == 'rucio' ][0] expired = [ src for src in get_rse_usage(rse2_id) if src['source'] == 'expired' ][0] assert tot_space['used'] == (nb_files2 + nb_files4) * file_size assert expired['used'] == nb_files4 * file_size # Total primary on T1=true : 2000 GB # Total secondary on T1=true : 200 GB # Ratio secondary / primary = 10 % # Ratio on RSE 1 : 0 % # Ratio on RSE 2 : 40 % # Now run BB8 re_evaluator(once=True, sleep_time=30, did_limit=100) bb8_run(once=True, rse_expression='%s=true' % str(T1), move_subscriptions=False, use_dump=False, sleep_time=300, threads=1, dry_run=False) for rule_id in rules: rule = get_rule(rule_id) if rule_id != rule_to_rebalance: assert (rule['child_rule_id'] is None) else: assert (rule['child_rule_id'] is not None) assert ( rule['expires_at'] <= datetime.utcnow() + timedelta(seconds=1) ) # timedelta needed to prevent failure due to rounding effects child_rule_id = rule['child_rule_id'] child_rule = get_rule(child_rule_id) assert (child_rule['rse_expression'] == rse2) # For teardown, delete child rule update_rule(child_rule_id, {'lifetime': -86400}) rule_cleaner(once=True) for dataset in dsn: set_metadata(mock_scope, dataset, 'lifetime', -86400) undertaker.run(once=True)
def test_throttler_fifo_release_all(self): """ THROTTLER (CLIENTS): throttler release all waiting requests (fifo). """ # no threshold -> release all waiting requests name1 = generate_uuid() name2 = generate_uuid() add_replica(self.source_rse, self.scope, name1, 1, self.account, session=self.db_session) add_replica(self.source_rse, self.scope, name2, 1, self.account, session=self.db_session) requests = [{ 'dest_rse_id': self.dest_rse_id, 'source_rse_id': self.source_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name1, 'account': self.account, 'scope': self.scope, 'rule_id': generate_uuid(), 'retry_count': 1, 'requested_at': datetime.now().replace(year=2018), 'attributes': { 'activity': self.user_activity, 'bytes': 1, 'md5': '', 'adler32': '' } }, { 'dest_rse_id': self.dest_rse_id, 'source_rse_id': self.source_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'requested_at': datetime.now().replace(year=2020), 'name': name2, 'account': self.account, 'scope': self.scope, 'rule_id': generate_uuid(), 'retry_count': 1, 'attributes': { 'activity': self.user_activity, 'bytes': 1, 'md5': '', 'adler32': '' } }] queue_requests(requests, session=self.db_session) self.db_session.commit() throttler.run(once=True, sleep_time=1) request = get_request_by_did(self.scope, name1, self.dest_rse) assert_equal(request['state'], constants.RequestState.QUEUED) request2 = get_request_by_did(self.scope, name2, self.dest_rse) assert_equal(request2['state'], constants.RequestState.QUEUED) # active transfers + waiting requests are less than the threshold -> release all waiting requests self.db_session.query(models.Request).delete() self.db_session.commit() name1 = generate_uuid() add_replica(self.source_rse, self.scope, name1, 1, self.account, session=self.db_session) set('throttler', '%s,%s' % (self.user_activity, self.dest_rse), 3, session=self.db_session) request = models.Request(dest_rse_id=self.dest_rse_id, activity=self.user_activity, state=constants.RequestState.SUBMITTED) request.save(session=self.db_session) requests = [{ 'dest_rse_id': self.dest_rse_id, 'source_rse_id': self.source_rse_id, 'request_type': constants.RequestType.TRANSFER, 'request_id': generate_uuid(), 'name': name1, 'account': self.account, 'scope': self.scope, 'rule_id': generate_uuid(), 'retry_count': 1, 'requested_at': datetime.now().replace(year=2018), 'attributes': { 'activity': self.user_activity, 'bytes': 1, 'md5': '', 'adler32': '' } }] queue_requests(requests, session=self.db_session) self.db_session.commit() throttler.run(once=True, sleep_time=1) request = get_request_by_did(self.scope, name1, self.dest_rse) assert_equal(request['state'], constants.RequestState.QUEUED)