def generate_rse(endpoint, token): rse_name = 'RSE%s' % generate_uuid().upper() scheme = 'https' impl = 'rucio.rse.protocols.webdav.Default' if not endpoint.startswith('https://'): scheme = 'srm' impl = 'rucio.rse.protocols.srm.Default' tmp_proto = { 'impl': impl, 'scheme': scheme, 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}} rse.add_rse(rse_name) tmp_proto['hostname'] = endpoint.split(':')[1][2:] tmp_proto['port'] = endpoint.split(':')[2].split('/')[0] tmp_proto['prefix'] = '/'.join([''] + endpoint.split(':')[2].split('/')[1:]) if scheme == 'srm': tmp_proto['extended_attributes'] = {'space_token': token, 'web_service_path': '/srm/managerv2?SFN='} rse.add_protocol(rse_name, tmp_proto) rse.add_rse_attribute(rse_name, key='fts', value='https://fts3-pilot.cern.ch:8446') account_limit.set_account_limit(account='root', rse_id=rsemanager.get_rse_info(rse_name)['id'], bytes=-1) return rsemanager.get_rse_info(rse_name)
def test_list_on_availability(): """ RSE_EXPRESSION_PARSER (CORE) List rses based on availability filter""" rsewrite_name = rse_name_generator() rsenowrite_name = rse_name_generator() rsewrite_id = rse.add_rse(rsewrite_name) rsenowrite_id = rse.add_rse(rsenowrite_name) attribute = attribute_name_generator() rse.add_rse_attribute(rsewrite_id, attribute, "de") rse.add_rse_attribute(rsenowrite_id, attribute, "de") rse.update_rse(rsewrite_id, {'availability_write': True}) rse.update_rse(rsenowrite_id, {'availability_write': False}) assert_equal( sorted([ item['id'] for item in rse_expression_parser.parse_expression("%s=de" % attribute) ]), sorted([rsewrite_id, rsenowrite_id])) assert_equal( sorted([ item['id'] for item in rse_expression_parser.parse_expression( "%s=de" % attribute, {'availability_write': True}) ]), sorted([rsewrite_id])) assert_raises(RSEBlacklisted, rse_expression_parser.parse_expression, "%s=de" % attribute, {'availability_write': False})
def test_list_rules_states(self): """ SUBSCRIPTION (API): Test listing of rule states for subscription """ tmp_scope = 'mock_' + uuid()[:8] add_scope(tmp_scope, 'root') site_a = 'RSE%s' % uuid().upper() site_b = 'RSE%s' % uuid().upper() add_rse(site_a) add_rse(site_b) # add a new dataset dsn = 'dataset-%s' % uuid() add_did(scope=tmp_scope, name=dsn, type=DIDType.DATASET, account='root') subscription_name = uuid() id = add_subscription(name=subscription_name, account='root', filter={'account': 'root'}, replication_rules=[(1, 'T1_DATADISK', False, True)], lifetime=100000, retroactive=0, dry_run=0, comments='This is a comment') subscriptions = list_subscriptions(name=subscription_name, account='root') # workaround until add_subscription returns the id id = None for s in subscriptions: id = s['id'] # Add two rules add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=id) add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=id) for r in list_subscription_rule_states(account='root', name=subscription_name): assert_equal(r[3], 2)
def test_list_rules_states(self): """ SUBSCRIPTION (API): Test listing of rule states for subscription """ tmp_scope = 'mock_' + uuid()[:8] add_scope(tmp_scope, 'root') site_a = 'RSE%s' % uuid().upper() site_b = 'RSE%s' % uuid().upper() add_rse(site_a) add_rse(site_b) # Add quota set_account_limit('root', get_rse_id(site_a), -1) set_account_limit('root', get_rse_id(site_b), -1) # add a new dataset dsn = 'dataset-%s' % uuid() add_did(scope=tmp_scope, name=dsn, type=DIDType.DATASET, account='root') subscription_name = uuid() subid = add_subscription(name=subscription_name, account='root', filter={'account': ['root', ], 'scope': [tmp_scope, ]}, replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}], lifetime=100000, retroactive=0, dry_run=0, comments='This is a comment', issuer='root') # Add two rules add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid) add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid) for rule in list_subscription_rule_states(account='root', name=subscription_name): assert_equal(rule[3], 2)
def _make_rse(self, scheme, protocol_impl, parameters=None, add_rse_kwargs=None): rse_name = rse_name_generator() if add_rse_kwargs and 'vo' in add_rse_kwargs: rse_id = rse_core.add_rse(rse_name, **add_rse_kwargs) else: rse_id = rse_core.add_rse(rse_name, vo=self.vo, **(add_rse_kwargs or {})) if scheme and protocol_impl: protocol_parameters = { 'scheme': scheme, 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': protocol_impl, 'domains': { 'wan': { 'read': 1, 'write': 1, 'delete': 1, 'third_party_copy_read': 1, 'third_party_copy_write': 1, } } } protocol_parameters.update(parameters or {}) rse_core.add_protocol(rse_id=rse_id, parameter=protocol_parameters) self.created_rses.append(rse_id) return rse_name, rse_id
def __init__(self): self.rse1 = rse_name_generator() self.rse2 = rse_name_generator() self.rse3 = rse_name_generator() self.rse4 = rse_name_generator() self.rse5 = rse_name_generator() self.rse1_id = rse.add_rse(self.rse1) self.rse2_id = rse.add_rse(self.rse2) self.rse3_id = rse.add_rse(self.rse3) self.rse4_id = rse.add_rse(self.rse4) self.rse5_id = rse.add_rse(self.rse5) # Add Attributes self.attribute = attribute_name_generator() rse.add_rse_attribute(self.rse1_id, self.attribute, "at") rse.add_rse_attribute(self.rse2_id, self.attribute, "de") rse.add_rse_attribute(self.rse3_id, self.attribute, "fr") rse.add_rse_attribute(self.rse4_id, self.attribute, "uk") rse.add_rse_attribute(self.rse5_id, self.attribute, "us") # Add Tags self.tag1 = tag_generator() self.tag2 = tag_generator() rse.add_rse_attribute(self.rse1_id, self.tag1, True) rse.add_rse_attribute(self.rse2_id, self.tag1, True) rse.add_rse_attribute(self.rse3_id, self.tag1, True) rse.add_rse_attribute(self.rse4_id, self.tag2, True) rse.add_rse_attribute(self.rse5_id, self.tag2, True) self.rse_client = RSEClient()
def test_list_on_availability(self): """ RSE_EXPRESSION_PARSER (CORE) List rses based on availability filter""" rsewrite_name = rse_name_generator() rsenowrite_name = rse_name_generator() rsewrite_id = rse.add_rse(rsewrite_name, **self.vo) rsenowrite_id = rse.add_rse(rsenowrite_name, **self.vo) attribute = attribute_name_generator() rse.add_rse_attribute(rsewrite_id, attribute, "de") rse.add_rse_attribute(rsenowrite_id, attribute, "de") rse.update_rse(rsewrite_id, {'availability_write': True}) rse.update_rse(rsenowrite_id, {'availability_write': False}) value = sorted([ item['id'] for item in rse_expression_parser.parse_expression( "%s=de" % attribute, **self.filter) ]) expected = sorted([rsewrite_id, rsenowrite_id]) assert value == expected filters = self.filter filters['availability_write'] = True value = sorted([ item['id'] for item in rse_expression_parser.parse_expression( "%s=de" % attribute, filters) ]) expected = sorted([rsewrite_id]) assert value == expected filters['availability_write'] = False pytest.raises(RSEWriteBlocked, rse_expression_parser.parse_expression, "%s=de" % attribute, filters)
def setup(self): self.rse1 = rse_name_generator() self.rse2 = rse_name_generator() self.rse3 = rse_name_generator() self.rse4 = rse_name_generator() self.rse5 = rse_name_generator() self.rse1_id = rse.add_rse(self.rse1) self.rse2_id = rse.add_rse(self.rse2) self.rse3_id = rse.add_rse(self.rse3) self.rse4_id = rse.add_rse(self.rse4) self.rse5_id = rse.add_rse(self.rse5) # Add Attributes self.attribute = attribute_name_generator() rse.add_rse_attribute(self.rse1, self.attribute, "at") rse.add_rse_attribute(self.rse2, self.attribute, "de") rse.add_rse_attribute(self.rse3, self.attribute, "fr") rse.add_rse_attribute(self.rse4, self.attribute, "uk") rse.add_rse_attribute(self.rse5, self.attribute, "us") # Add Tags self.tag1 = tag_generator() self.tag2 = tag_generator() rse.add_rse_attribute(self.rse1, self.tag1, True) rse.add_rse_attribute(self.rse2, self.tag1, True) rse.add_rse_attribute(self.rse3, self.tag1, True) rse.add_rse_attribute(self.rse4, self.tag2, True) rse.add_rse_attribute(self.rse5, self.tag2, True)
def test_list_replica_with_domain(self): """ REPLICA (CORE): Add and list file replicas forcing domain""" tmp_rse = rse_name_generator() add_rse(tmp_rse) protocols = [ { 'scheme': 'MOCK', 'hostname': 'localhost', 'port': 17, 'prefix': '/i/prefer/the/lan', 'impl': 'rucio.rse.protocols.mock.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }, { 'scheme': 'MOCK', 'hostname': 'localhost', 'port': 18, 'prefix': '/i/prefer/the/wan', 'impl': 'rucio.rse.protocols.mock.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }, ] for p in protocols: add_protocol(tmp_rse, p) nbfiles = 3 files = [{ 'scope': 'mock', 'name': 'file_%s' % generate_uuid(), 'bytes': 1234L, 'adler32': '01234567', 'meta': { 'events': 1234 } } for i in range(nbfiles)]
def setUp(self): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): self.vo = { 'vo': config_get('client', 'vo', raise_exception=False, default='tst') } self.filter = {'filter': self.vo} else: self.vo = {} self.filter = {'filter': {'vo': 'def'}} self.rse1 = rse_name_generator() self.rse2 = rse_name_generator() self.rse3 = rse_name_generator() self.rse4 = rse_name_generator() self.rse5 = rse_name_generator() self.rse1_id = rse.add_rse(self.rse1, **self.vo) self.rse2_id = rse.add_rse(self.rse2, **self.vo) self.rse3_id = rse.add_rse(self.rse3, **self.vo) self.rse4_id = rse.add_rse(self.rse4, **self.vo) self.rse5_id = rse.add_rse(self.rse5, **self.vo) # Add Attributes self.attribute = attribute_name_generator() rse.add_rse_attribute(self.rse1_id, self.attribute, "at") rse.add_rse_attribute(self.rse2_id, self.attribute, "de") rse.add_rse_attribute(self.rse3_id, self.attribute, "fr") rse.add_rse_attribute(self.rse4_id, self.attribute, "uk") rse.add_rse_attribute(self.rse5_id, self.attribute, "us") # Add numeric Attributes self.attribute_numeric = attribute_name_generator() rse.add_rse_attribute(self.rse1_id, self.attribute_numeric, 10) rse.add_rse_attribute(self.rse2_id, self.attribute_numeric, 20) rse.add_rse_attribute(self.rse3_id, self.attribute_numeric, 30) rse.add_rse_attribute(self.rse4_id, self.attribute_numeric, 40) rse.add_rse_attribute(self.rse5_id, self.attribute_numeric, 50) # Add Tags self.tag1 = tag_generator() self.tag2 = tag_generator() rse.add_rse_attribute(self.rse1_id, self.tag1, True) rse.add_rse_attribute(self.rse2_id, self.tag1, True) rse.add_rse_attribute(self.rse3_id, self.tag1, True) rse.add_rse_attribute(self.rse4_id, self.tag2, True) rse.add_rse_attribute(self.rse5_id, self.tag2, True)
def test_abacus_collection_replica_cleanup(self): """ ABACUS (COLLECTION REPLICA): Test if the cleanup procedure works correctly. """ collection_replica.run(once=True) db_session = session.get_session() rse1 = rse_name_generator() rse_id1 = add_rse(rse1, **self.vo) rse2 = rse_name_generator() rse_id2 = add_rse(rse2, **self.vo) scope = InternalScope('mock', **self.vo) dataset = 'dataset_%s' % generate_uuid() jdoe = InternalAccount('jdoe', **self.vo) add_did(scope, dataset, DIDType.DATASET, jdoe) models.CollectionReplica(scope=scope, name=dataset, rse_id=rse_id1, state=ReplicaState.AVAILABLE, bytes=1).save(session=db_session, flush=False) models.CollectionReplica(scope=scope, name=dataset, rse_id=rse_id2, state=ReplicaState.AVAILABLE, bytes=1).save(session=db_session, flush=False) models.UpdatedCollectionReplica(scope=scope, name=dataset, rse_id=rse_id1, did_type=DIDType.DATASET).save( session=db_session, flush=False) models.UpdatedCollectionReplica(scope=scope, name=dataset, rse_id=rse_id1, did_type=DIDType.DATASET).save( session=db_session, flush=False) models.UpdatedCollectionReplica(scope=scope, name=dataset, rse_id=rse_id2, did_type=DIDType.DATASET).save( session=db_session, flush=False) models.UpdatedCollectionReplica(scope=scope, name=dataset, rse_id=rse_id2, did_type=DIDType.DATASET).save( session=db_session, flush=False) models.UpdatedCollectionReplica(scope=scope, name=dataset, rse_id=None, did_type=DIDType.DATASET).save( session=db_session, flush=False) db_session.commit() assert len(get_cleaned_updated_collection_replicas(1, 1)) == 3 self.did_client.set_metadata(scope.external, dataset, 'lifetime', -1)
def test_list_archive_contents_at_rse(self): """ ARCHIVE (CORE): Transparent archive listing at RSE """ scope = 'mock' rse1 = 'APERTURE_%s' % rse_name_generator() add_rse(rse1) add_protocol(rse1, {'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) rse2 = 'BLACKMESA_%s' % rse_name_generator() add_rse(rse2) add_protocol(rse2, {'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//lambda/complex/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) # register archive archive1 = {'scope': scope, 'name': 'cube.1.zip', 'type': 'FILE', 'bytes': 2596, 'adler32': 'beefdead'} archive2 = {'scope': scope, 'name': 'cube.2.zip', 'type': 'FILE', 'bytes': 5432, 'adler32': 'deadbeef'} add_replicas(rse=rse1, files=[archive1], account='root') add_replicas(rse=rse2, files=[archive2], account='root') # archived files with replicas archived_file = [{'scope': scope, 'name': 'zippedfile-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 4322, 'adler32': 'beefbeef'} for i in xrange(2)] self.dc.add_files_to_archive(scope=scope, name=archive1['name'], files=archived_file) self.dc.add_files_to_archive(scope=scope, name=archive2['name'], files=archived_file) res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], rse_expression=rse1, resolve_archives=True)] res = self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], metalink=True, rse_expression=rse1, resolve_archives=True) assert_in('APERTURE', res) assert_not_in('BLACKMESA', res) res = self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], metalink=True, rse_expression=rse2, resolve_archives=True) assert_in('BLACKMESA', res) assert_not_in('APERTURE', res) del_rse(rse1) del_rse(rse2)
def setup(self): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')} else: self.vo = {} self.rc = client.ReplicaClient() self.rse1 = rse_name_generator() self.rse2 = rse_name_generator() self.rse1_id = add_rse(self.rse1, **self.vo) self.rse2_id = add_rse(self.rse2, **self.vo) add_protocol(self.rse1_id, {'scheme': 'https', 'hostname': 'storage.googleapis.com', 'port': 443, 'prefix': '/atlas-europe-west1/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) add_protocol(self.rse2_id, {'scheme': 'https', 'hostname': 'storage.googleapis.com', 'port': 443, 'prefix': '/atlas-europe-east1/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) # register some files there self.files = [{'scope': InternalScope('mock', **self.vo), 'name': 'file-on-gcs_%s' % i, 'bytes': 1234, 'adler32': 'deadbeef', 'meta': {'events': 666}} for i in range(0, 3)] root = InternalAccount('root', **self.vo) add_replicas(rse_id=self.rse1_id, files=self.files, account=root, ignore_availability=True) add_replicas(rse_id=self.rse2_id, files=self.files, account=root, ignore_availability=True) def tearDown(self): delete_replicas(rse_id=self.rse1_id, files=self.files) delete_replicas(rse_id=self.rse2_id, files=self.files) del_rse(rse_id=self.rse1_id) del_rse(rse_id=self.rse2_id)
def test_list_archive_contents_transparently(self): """ ARCHIVE (CORE): Transparent archive listing """ scope = 'mock' rse = 'APERTURE_%s' % rse_name_generator() add_rse(rse) add_protocol(rse, {'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) # register archive archive = {'scope': scope, 'name': 'weighted.storage.cube.zip', 'type': 'FILE', 'bytes': 2596, 'adler32': 'beefdead'} add_replicas(rse=rse, files=[archive], account='root') # archived files with replicas files_with_replicas = [{'scope': scope, 'name': 'witrep-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 1234, 'adler32': 'deadbeef'} for i in xrange(2)] add_replicas(rse=rse, files=files_with_replicas, account='root') self.dc.add_files_to_archive(scope=scope, name=archive['name'], files=files_with_replicas) res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in files_with_replicas], resolve_archives=True)] assert_equal(len(res), 2) assert_equal(len(res[0]), 2) assert_equal(len(res[1]), 2) for r in res: for p in r: if r[p]['domain'] == 'zip': assert_in('weighted.storage.cube.zip?xrdcl.unzip=witrep-', p) else: assert_not_in('weighted.storage.cube.zip?xrdcl.unzip=witrep-', p) # archived files without replicas files = [{'scope': scope, 'name': 'norep-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 1234, 'adler32': 'deadbeef'} for i in xrange(2)] self.dc.add_files_to_archive(scope=scope, name=archive['name'], files=files) res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in files], resolve_archives=True)] assert_equal(len(res), 2) for r in res: assert_in('weighted.storage.cube.zip?xrdcl.unzip=norep-', r.keys()[0]) del_rse(rse)
def add_rse(rse, issuer, vo='def', deterministic=True, volatile=False, city=None, region_code=None, country_name=None, continent=None, time_zone=None, ISP=None, staging_area=False, rse_type=None, latitude=None, longitude=None, ASN=None, availability=None): """ Creates a new Rucio Storage Element(RSE). :param rse: The RSE name. :param issuer: The issuer account. :param vo: The VO to act on. :param deterministic: Boolean to know if the pfn is generated deterministically. :param volatile: Boolean for RSE cache. :param city: City for the RSE. :param region_code: The region code for the RSE. :param country_name: The country. :param continent: The continent. :param time_zone: Timezone. :param staging_area: staging area. :param ISP: Internet service provider. :param rse_type: RSE type. :param latitude: Latitude coordinate of RSE. :param longitude: Longitude coordinate of RSE. :param ASN: Access service network. :param availability: Availability. """ validate_schema(name='rse', obj=rse) kwargs = {'rse': rse} if not permission.has_permission( issuer=issuer, vo=vo, action='add_rse', kwargs=kwargs): raise exception.AccessDenied('Account %s can not add RSE' % (issuer)) return rse_module.add_rse(rse, vo=vo, deterministic=deterministic, volatile=volatile, city=city, region_code=region_code, country_name=country_name, staging_area=staging_area, continent=continent, time_zone=time_zone, ISP=ISP, rse_type=rse_type, latitude=latitude, longitude=longitude, ASN=ASN, availability=availability)
def test_to_repair_a_rule_with_only_1_rse_whose_site_is_blocklisted(self): """ JUDGE REPAIRER: Test to repair a rule with only 1 rse whose site is blocklisted""" rse = rse_name_generator() rse_id = add_rse(rse, **self.vo) update_rse(rse_id, {'availability_write': False}) set_local_account_limit(self.jdoe, rse_id, -1) rule_repairer(once=True) # Clean out the repairer scope = InternalScope('mock', **self.vo) files = create_files(4, scope, self.rse4_id, bytes=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=rse, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, ignore_availability=True, activity='DebugJudge')[0] assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) # Stil assert STUCK because of ignore_availability: assert(RuleState.STUCK == get_rule(rule_id)['state']) region = make_region().configure('dogpile.cache.memcached', expiration_time=3600, arguments={'url': config_get('cache', 'url', False, '127.0.0.1:11211'), 'distributed_lock': True}) region.delete(sha256(rse.encode()).hexdigest()) update_rse(rse_id, {'availability_write': True}) rule_repairer(once=True) assert(RuleState.REPLICATING == get_rule(rule_id)['state'])
def __add_test_rse_and_replicas(vo, scope, rse_name, names, file_size, epoch_tombstone=False): rse_id = rse_core.add_rse(rse_name, vo=vo) rse_core.add_protocol(rse_id=rse_id, parameter=__mock_protocol) tombstone = datetime.utcnow() - timedelta(days=1) if epoch_tombstone: tombstone = datetime(year=1970, month=1, day=1) dids = [] for file_name in names: dids.append({'scope': scope, 'name': file_name}) replica_core.add_replica(rse_id=rse_id, scope=scope, name=file_name, bytes_=file_size, tombstone=tombstone, account=InternalAccount('root', vo=vo), adler32=None, md5=None) return rse_name, rse_id, dids
def _make_rse(self, scheme, protocol_impl, parameters=None, add_rse_kwargs=None): rse_name = rse_name_generator() rse_id = rse_core.add_rse(rse_name, vo=self.vo, **(add_rse_kwargs or {})) if scheme and protocol_impl: rse_core.add_protocol(rse_id=rse_id, parameter={ 'scheme': scheme, 'hostname': 'host%d' % len(self.created_rses), 'port': 0, 'prefix': '/test/', 'impl': protocol_impl, 'domains': { 'wan': { 'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1 } }, **(parameters or {}) }) self.created_rses.append(rse_id) return rse_name, rse_id
def test_add_rule_with_ignore_availability(self): """ REPLICATION RULE (CORE): Add a replication rule with ignore_availability setting""" rse = rse_name_generator() add_rse(rse) update_rse(rse, {'availability_write': False}) scope = 'mock' files = create_files(3, scope, self.rse1) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe') attach_dids(scope, dataset, files, 'jdoe') with assert_raises(InvalidRSEExpression): add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=rse, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None)[0] add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=rse, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None, ignore_availability=True)[0]
def test_list_rules_states(vo, rest_client, auth_token): """ SUBSCRIPTION (REST): Test listing of rule states for subscription """ tmp_scope = InternalScope('mock_' + uuid()[:8], vo=vo) root = InternalAccount('root', vo=vo) add_scope(tmp_scope, root) site_a = 'RSE%s' % uuid().upper() site_b = 'RSE%s' % uuid().upper() site_a_id = add_rse(site_a, vo=vo) site_b_id = add_rse(site_b, vo=vo) # Add quota set_local_account_limit(root, site_a_id, -1) set_local_account_limit(root, site_b_id, -1) # add a new dataset dsn = 'dataset-%s' % uuid() add_did(scope=tmp_scope, name=dsn, did_type=DIDType.DATASET, account=root) subscription_name = uuid() subid = add_subscription(name=subscription_name, account='root', filter_={'account': ['root', ], 'scope': [tmp_scope.external, ]}, replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}], lifetime=100000, retroactive=0, dry_run=0, comments='We want a shrubbery', issuer='root', vo=vo) # Add two rules add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account=root, copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid) add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account=root, copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid) response = rest_client.get('/subscriptions/%s/%s/Rules/States' % ('root', subscription_name), headers=headers(auth(auth_token))) assert response.status_code == 200 rulestates = None for line in response.get_data(as_text=True).split('\n'): if line: rulestates = loads(line) if rulestates[1] == subscription_name: break assert rulestates is not None assert rulestates[3] == 2
def test_list_rules_states(self): """ SUBSCRIPTION (REST): Test listing of rule states for subscription """ tmp_scope = 'mock_' + uuid()[:8] add_scope(tmp_scope, 'root') mw = [] site_a = 'RSE%s' % uuid().upper() site_b = 'RSE%s' % uuid().upper() add_rse(site_a) add_rse(site_b) # add a new dataset dsn = 'dataset-%s' % uuid() add_did(scope=tmp_scope, name=dsn, type=DIDType.DATASET, account='root') subscription_name = uuid() id = add_subscription(name=subscription_name, account='root', filter={'account': 'root'}, replication_rules=[(1, 'T1_DATADISK', False, True)], lifetime=100000, retroactive=0, dry_run=0, comments='We want a shrubbery') subscriptions = list_subscriptions(name=subscription_name, account='root') # workaround until add_subscription returns the id id = None for s in subscriptions: id = s['id'] # Add two rules add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=id) add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=id) headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': '******', 'X-Rucio-Password': '******'} r1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True) assert_equal(r1.status, 200) token = str(r1.header('X-Rucio-Auth-Token')) headers2 = {'X-Rucio-Auth-Token': str(token)} r2 = TestApp(subs_app.wsgifunc(*mw)).get('/%s/%s/Rules/States' % ('root', subscription_name), headers=headers2, expect_errors=True) for line in r2.body.split('\n'): print line rs = loads(line) if rs[1] == subscription_name: break assert_equal(rs[3], 2)
def setUp(self): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): self.vo = { 'vo': config_get('client', 'vo', raise_exception=False, default='tst') } else: self.vo = {} self.rse1 = rse_name_generator() self.rse2 = rse_name_generator() self.rse3 = rse_name_generator() self.rse4 = rse_name_generator() self.rse5 = rse_name_generator() self.rse1_id = rse.add_rse(self.rse1, **self.vo) self.rse2_id = rse.add_rse(self.rse2, **self.vo) self.rse3_id = rse.add_rse(self.rse3, **self.vo) self.rse4_id = rse.add_rse(self.rse4, **self.vo) self.rse5_id = rse.add_rse(self.rse5, **self.vo) # Add Attributes self.attribute = attribute_name_generator() rse.add_rse_attribute(self.rse1_id, self.attribute, "at") rse.add_rse_attribute(self.rse2_id, self.attribute, "de") rse.add_rse_attribute(self.rse3_id, self.attribute, "fr") rse.add_rse_attribute(self.rse4_id, self.attribute, "uk") rse.add_rse_attribute(self.rse5_id, self.attribute, "us") # Add Tags self.tag1 = tag_generator() self.tag2 = tag_generator() rse.add_rse_attribute(self.rse1_id, self.tag1, True) rse.add_rse_attribute(self.rse2_id, self.tag1, True) rse.add_rse_attribute(self.rse3_id, self.tag1, True) rse.add_rse_attribute(self.rse4_id, self.tag2, True) rse.add_rse_attribute(self.rse5_id, self.tag2, True) self.rse_client = RSEClient()
def setup(self): self.rc = client.ReplicaClient() self.rse1 = rse_name_generator() self.rse2 = rse_name_generator() add_rse(self.rse1) add_rse(self.rse2) add_protocol(self.rse1, {'scheme': 'https', 'hostname': 'storage.googleapis.com', 'port': 443, 'prefix': '/atlas-europe-west1/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) add_protocol(self.rse2, {'scheme': 'https', 'hostname': 'storage.googleapis.com', 'port': 443, 'prefix': '/atlas-europe-east1/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) # register some files there self.files = [{'scope': 'mock', 'name': 'file-on-gcs_%s' % i, 'bytes': 1234, 'adler32': 'deadbeef', 'meta': {'events': 666}} for i in range(0, 3)] add_replicas(rse=self.rse1, files=self.files, account='root', ignore_availability=True) add_replicas(rse=self.rse2, files=self.files, account='root', ignore_availability=True) def tearDown(self): delete_replicas(rse=self.rse1, files=self.files) delete_replicas(rse=self.rse2, files=self.files) del_rse(self.rse1) del_rse(self.rse2)
def setUp(self): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): self.vo = {'vo': get_vo()} else: self.vo = {} self.root = InternalAccount('root', **self.vo) # add an S3 storage with a replica self.rc = client.ReplicaClient() self.rses3 = rse_name_generator() self.rses3_id = add_rse(self.rses3, **self.vo) add_protocol(self.rses3_id, {'scheme': 'https', 'hostname': 'fake-rucio.s3-eu-south-8.amazonaws.com', 'port': 443, 'prefix': '/', 'impl': 'rucio.rse.protocols.gfal.NoRename', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) add_rse_attribute(rse_id=self.rses3_id, key='sign_url', value='s3') add_rse_attribute(rse_id=self.rses3_id, key='fts', value='localhost') self.files3 = [{'scope': InternalScope('mock', **self.vo), 'name': 'file-on-aws', 'bytes': 1234, 'adler32': 'deadbeef', 'meta': {'events': 123}}] add_replicas(rse_id=self.rses3_id, files=self.files3, account=self.root) # add a non-S3 storage with a replica self.rsenons3 = rse_name_generator() self.rsenons3_id = add_rse(self.rsenons3, **self.vo) add_protocol(self.rsenons3_id, {'scheme': 'https', 'hostname': 'somestorage.ch', 'port': 1094, 'prefix': '/my/prefix', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) add_rse_attribute(rse_id=self.rsenons3_id, key='fts', value='localhost') self.filenons3 = [{'scope': InternalScope('mock', **self.vo), 'name': 'file-on-storage', 'bytes': 1234, 'adler32': 'deadbeef', 'meta': {'events': 321}}] add_replicas(rse_id=self.rsenons3_id, files=self.filenons3, account=self.root) # set the distance both ways add_distance(self.rses3_id, self.rsenons3_id, ranking=1, agis_distance=1, geoip_distance=1) add_distance(self.rsenons3_id, self.rses3_id, ranking=1, agis_distance=1, geoip_distance=1)
def test_list_rules_states(self): """ SUBSCRIPTION (REST): Test listing of rule states for subscription """ tmp_scope = InternalScope('mock_' + uuid()[:8]) root = InternalAccount('root') add_scope(tmp_scope, root) mw = [] site_a = 'RSE%s' % uuid().upper() site_b = 'RSE%s' % uuid().upper() site_a_id = add_rse(site_a) site_b_id = add_rse(site_b) # Add quota set_account_limit(root, site_a_id, -1) set_account_limit(root, site_b_id, -1) # add a new dataset dsn = 'dataset-%s' % uuid() add_did(scope=tmp_scope, name=dsn, type=DIDType.DATASET, account=root) subscription_name = uuid() subid = add_subscription(name=subscription_name, account='root', filter={'account': ['root', ], 'scope': [tmp_scope.external, ]}, replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}], lifetime=100000, retroactive=0, dry_run=0, comments='We want a shrubbery', issuer='root') # Add two rules add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account=root, copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid) add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account=root, copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid) headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': '******', 'X-Rucio-Password': '******'} res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True) assert_equal(res1.status, 200) token = str(res1.header('X-Rucio-Auth-Token')) headers2 = {'X-Rucio-Auth-Token': str(token)} res2 = TestApp(subs_app.wsgifunc(*mw)).get('/%s/%s/Rules/States' % ('root', subscription_name), headers=headers2, expect_errors=True) for line in res2.body.split('\n'): print(line) rs = loads(line) if rs[1] == subscription_name: break assert_equal(rs[3], 2)
def test_atlas_archival_policy(self): """ UNDERTAKER (CORE): Test the atlas archival policy. """ tmp_scope = 'mock' nbdatasets = 5 nbfiles = 5 rse = 'LOCALGROUPDISK_%s' % rse_name_generator() add_rse(rse) set_account_limit('jdoe', get_rse_id(rse), -1) dsns2 = [{ 'name': 'dsn_%s' % generate_uuid(), 'scope': tmp_scope, 'type': 'DATASET', 'lifetime': -1, 'rules': [{ 'account': 'jdoe', 'copies': 1, 'rse_expression': rse, 'grouping': 'DATASET' }] } for i in xrange(nbdatasets)] add_dids(dids=dsns2, account='root') replicas = list() for dsn in dsns2: files = [{ 'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1L, 'adler32': '0cc737eb', 'tombstone': datetime.utcnow() + timedelta(weeks=2), 'meta': { 'events': 10 } } for i in xrange(nbfiles)]
def test_export_import(self): """ IMPORT/EXPORT (REST): Test the export and import of data together to check same syntax.""" # Setup new RSE, distance, attribute, limits new_rse = rse_name_generator() add_rse(new_rse) # Get token mw = [] headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': '******', 'X-Rucio-Password': '******'} r1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True) token = str(r1.header('X-Rucio-Auth-Token')) headers2 = {'X-Rucio-Type': 'user', 'X-Rucio-Account': 'root', 'X-Rucio-Auth-Token': str(token)} # Export data r2 = TestApp(export_app.wsgifunc(*mw)).get('/', headers=headers2, expect_errors=True) exported_data = parse_response(r2.body) # Import data r3 = TestApp(import_app.wsgifunc(*mw)).post('/', headers=headers2, expect_errors=True, params=render_json(**exported_data)) assert_equal(r3.status, 201)
def __init__(self): self.rse1 = rse_name_generator() self.rse2 = rse_name_generator() self.rse3 = rse_name_generator() self.rse4 = rse_name_generator() self.rse5 = rse_name_generator() self.rse1_id = rse.add_rse(self.rse1) self.rse2_id = rse.add_rse(self.rse2) self.rse3_id = rse.add_rse(self.rse3) self.rse4_id = rse.add_rse(self.rse4) self.rse5_id = rse.add_rse(self.rse5) # Add Attributes self.attribute = attribute_name_generator() rse.add_rse_attribute(self.rse1_id, self.attribute, "at") rse.add_rse_attribute(self.rse2_id, self.attribute, "de") rse.add_rse_attribute(self.rse3_id, self.attribute, "fr") rse.add_rse_attribute(self.rse4_id, self.attribute, "uk") rse.add_rse_attribute(self.rse5_id, self.attribute, "us") # Add numeric Attributes self.attribute_numeric = attribute_name_generator() rse.add_rse_attribute(self.rse1_id, self.attribute_numeric, 10) rse.add_rse_attribute(self.rse2_id, self.attribute_numeric, 20) rse.add_rse_attribute(self.rse3_id, self.attribute_numeric, 30) rse.add_rse_attribute(self.rse4_id, self.attribute_numeric, 40) rse.add_rse_attribute(self.rse5_id, self.attribute_numeric, 50) # Add Tags self.tag1 = tag_generator() self.tag2 = tag_generator() rse.add_rse_attribute(self.rse1_id, self.tag1, True) rse.add_rse_attribute(self.rse2_id, self.tag1, True) rse.add_rse_attribute(self.rse3_id, self.tag1, True) rse.add_rse_attribute(self.rse4_id, self.tag2, True) rse.add_rse_attribute(self.rse5_id, self.tag2, True)
def setup(self): self.rc = ReplicaClient() self.client_location_without_proxy = { 'ip': '192.168.0.1', 'fqdn': 'anomalous-materials.blackmesa.com', 'site': 'BLACKMESA' } self.rse_without_proxy = rse_name_generator() add_rse(self.rse_without_proxy) add_rse_attribute(rse=self.rse_without_proxy, key='site', value='BLACKMESA') self.client_location_with_proxy = { 'ip': '10.0.1.1', 'fqdn': 'test-chamber.aperture.com', 'site': 'APERTURE' } self.rse_with_proxy = rse_name_generator() add_rse(self.rse_with_proxy) add_rse_attribute(rse=self.rse_with_proxy, key='root-proxy-internal', value='root://proxy.aperture.com:1094') add_rse_attribute(rse=self.rse_with_proxy, key='site', value='APERTURE') self.files = [{ 'scope': 'mock', 'name': 'half-life_%s' % i, 'bytes': 1234L, 'adler32': 'deadbeef', 'meta': { 'events': 666 } } for i in xrange(1, 4)]
def test_list_rses_based_on_availability(self): """ RSE_EXPRESSION_PARSER (CORE) List rses based on availability filter""" rseWRITE_name = rse_name_generator() rseNOWRITE_name = rse_name_generator() rseWRITE_id = rse.add_rse(rseWRITE_name) rseNOWRITE_id = rse.add_rse(rseNOWRITE_name) attribute = attribute_name_generator() rse.add_rse_attribute(rseWRITE_name, attribute, "de") rse.add_rse_attribute(rseNOWRITE_name, attribute, "de") rse.update_rse(rseWRITE_name, {'availability_write': True}) rse.update_rse(rseNOWRITE_name, {'availability_write': False}) assert_equal(sorted([item['id'] for item in rse_expression_parser.parse_expression("%s=de" % attribute)]), sorted([rseWRITE_id, rseNOWRITE_id])) assert_equal(sorted([item['id'] for item in rse_expression_parser.parse_expression("%s=de" % attribute, {'availability_write': True})]), sorted([rseWRITE_id])) assert_raises(InvalidRSEExpression, rse_expression_parser.parse_expression, "%s=de" % attribute, {'availability_write': False})
def test_atlas_archival_policy(self): """ UNDERTAKER (CORE): Test the atlas archival policy. """ tmp_scope = 'mock' nbdatasets = 5 nbfiles = 5 rse = 'LOCALGROUPDISK_%s' % rse_name_generator() add_rse(rse) set_account_limit('jdoe', get_rse_id(rse), -1) dsns2 = [{'name': 'dsn_%s' % generate_uuid(), 'scope': tmp_scope, 'type': 'DATASET', 'lifetime': -1, 'rules': [{'account': 'jdoe', 'copies': 1, 'rse_expression': rse, 'grouping': 'DATASET'}]} for i in range(nbdatasets)] add_dids(dids=dsns2, account='root') replicas = list() for dsn in dsns2: files = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'tombstone': datetime.utcnow() + timedelta(weeks=2), 'meta': {'events': 10}} for i in range(nbfiles)] attach_dids(scope=tmp_scope, name=dsn['name'], rse=rse, dids=files, account='root') replicas += files undertaker(worker_number=1, total_workers=1, once=True) for replica in replicas: assert(get_replica(scope=replica['scope'], name=replica['name'], rse=rse)['tombstone'] is None) for dsn in dsns2: assert(get_did(scope='archive', name=dsn['name'])['name'] == dsn['name']) assert(len([x for x in list_rules(filters={'scope': 'archive', 'name': dsn['name']})]) == 1)
def test_reaper(): """ REAPER2 (DAEMON): Test the reaper2 daemon.""" if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')} else: vo = {} rse_name = rse_name_generator() rse_id = rse_core.add_rse(rse_name, **vo) mock_protocol = {'scheme': 'MOCK', 'hostname': 'localhost', 'port': 123, 'prefix': '/test/reaper', 'impl': 'rucio.rse.protocols.mock.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}} rse_core.add_protocol(rse_id=rse_id, parameter=mock_protocol) nb_files = 30 file_size = 2147483648 # 2G file_names = [] for i in range(nb_files): file_name = 'lfn' + generate_uuid() file_names.append(file_name) replica_core.add_replica(rse_id=rse_id, scope=InternalScope('data13_hip', **vo), name=file_name, bytes=file_size, tombstone=datetime.utcnow() - timedelta(days=1), account=InternalAccount('root', **vo), adler32=None, md5=None) rse_core.set_rse_usage(rse_id=rse_id, source='storage', used=nb_files * file_size, free=800) rse_core.set_rse_limits(rse_id=rse_id, name='MinFreeSpace', value=10737418240) rse_core.set_rse_limits(rse_id=rse_id, name='MaxBeingDeletedFiles', value=10) if vo: reaper(once=True, rses=[], include_rses='vo=%s&(%s)' % (vo['vo'], rse_name), exclude_rses=[]) reaper(once=True, rses=[], include_rses='vo=%s&(%s)' % (vo['vo'], rse_name), exclude_rses=[]) else: reaper(once=True, rses=[], include_rses=rse_name, exclude_rses=[]) reaper(once=True, rses=[], include_rses=rse_name, exclude_rses=[]) assert len(list(replica_core.list_replicas(dids=[{'scope': InternalScope('data13_hip', **vo), 'name': n} for n in file_names], rse_expression=rse_name))) == nb_files - 5
def add_rse(rse, issuer, deterministic=True, volatile=False, city=None, region_code=None, country_name=None, continent=None, time_zone=None, ISP=None, staging_area=False): """ Creates a new Rucio Storage Element(RSE). :param rse: The RSE name. :param issuer: The issuer account. :param deterministic: Boolean to know if the pfn is generated deterministically. :param volatile: Boolean for RSE cache. :param city: City for the RSE. :param region_code: The region code for the RSE. :param country_name: The country. :param continent: The continent. :param time_zone: Timezone. :param staging_area: staging area. :param ISP: Internet service provider. """ validate_schema(name='rse', obj=rse) kwargs = {'rse': rse} if not permission.has_permission( issuer=issuer, action='add_rse', kwargs=kwargs): raise exception.AccessDenied('Account %s can not add RSE' % (issuer)) return rse_module.add_rse(rse, deterministic=deterministic, volatile=volatile, city=city, region_code=region_code, country_name=country_name, staging_area=staging_area, continent=continent, time_zone=time_zone, ISP=ISP)
def _make_rse(self, scheme, protocol_impl): rse_name = rse_name_generator() rse_id = rse_core.add_rse(rse_name, vo=self.vo) rse_core.add_protocol(rse_id=rse_id, parameter={ 'scheme': scheme, 'hostname': 'host%d' % len(self.created_rses), 'port': 0, 'prefix': '/test', 'impl': protocol_impl, 'domains': { 'wan': { 'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1 } } }) self.created_rses.append(rse_id) return rse_name, rse_id
def add_rse(rse, issuer, deterministic=True, volatile=False, city=None, region_code=None, country_name=None, continent=None, time_zone=None, ISP=None, staging_area=False): """ Creates a new Rucio Storage Element(RSE). :param rse: The RSE name. :param issuer: The issuer account. :param deterministic: Boolean to know if the pfn is generated deterministically. :param volatile: Boolean for RSE cache. :param city: City for the RSE. :param region_code: The region code for the RSE. :param country_name: The country. :param continent: The continent. :param time_zone: Timezone. :param staging_area: staging area. :param ISP: Internet service provider. """ validate_schema(name='rse', obj=rse) kwargs = {'rse': rse} if not permission.has_permission(issuer=issuer, action='add_rse', kwargs=kwargs): raise exception.AccessDenied('Account %s can not add RSE' % (issuer)) return rse_module.add_rse(rse, deterministic=deterministic, volatile=volatile, city=city, region_code=region_code, country_name=country_name, staging_area=staging_area, continent=continent, time_zone=time_zone, ISP=ISP)
def setup(self): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): self.vo_header = {'X-Rucio-VO': 'tst'} self.vo = {'vo': 'tst'} else: self.vo_header = {} self.vo = {} # New RSE self.new_rse = rse_name_generator() # RSE 1 that already exists self.old_rse_1 = rse_name_generator() self.old_rse_id_1 = add_rse(self.old_rse_1, availability=1, region_code='DE', country_name='DE', deterministic=True, volatile=True, staging_area=True, time_zone='Europe', latitude='1', longitude='2', **self.vo) add_protocol( self.old_rse_id_1, { 'scheme': 'scheme1', 'hostname': 'hostname1', 'port': 1000, 'impl': 'TODO' }) add_protocol( self.old_rse_id_1, { 'scheme': 'scheme3', 'hostname': 'hostname3', 'port': 1000, 'impl': 'TODO' }) set_rse_limits(rse_id=self.old_rse_id_1, name='MaxBeingDeletedFiles', value='10') set_rse_limits(rse_id=self.old_rse_id_1, name='MinFreeSpace', value='10') add_rse_attribute(rse_id=self.old_rse_id_1, key='attr1', value='test10') add_rse_attribute(rse_id=self.old_rse_id_1, key='lfn2pfn_algorithm', value='test10') add_rse_attribute(rse_id=self.old_rse_id_1, key='verify_checksum', value=True) # RSE 2 that already exists self.old_rse_2 = rse_name_generator() self.old_rse_id_2 = add_rse(self.old_rse_2, **self.vo) # RSE 3 that already exists self.old_rse_3 = rse_name_generator() self.old_rse_id_3 = add_rse(self.old_rse_3, **self.vo) # RSE 4 that already exists self.old_rse_4 = rse_name_generator() self.old_rse_id_4 = add_rse(self.old_rse_4, **self.vo) # Distance that already exists add_distance(self.old_rse_id_1, self.old_rse_id_2) self.data1 = { 'rses': { self.new_rse: { 'rse_type': RSEType.TAPE, 'availability': 3, 'city': 'NewCity', 'region_code': 'CH', 'country_name': 'switzerland', 'staging_area': False, 'time_zone': 'Europe', 'latitude': 1, 'longitude': 2, 'deterministic': True, 'volatile': False, 'protocols': [{ 'scheme': 'scheme', 'hostname': 'hostname', 'port': 1000, 'impl': 'impl' }], 'attributes': { 'attr1': 'test' }, 'MinFreeSpace': 20000, 'lfn2pfn_algorithm': 'hash2', 'verify_checksum': False, 'availability_delete': True, 'availability_read': False, 'availability_write': True }, self.old_rse_1: { 'rse_type': RSEType.TAPE, 'deterministic': False, 'volatile': False, 'region_code': 'US', 'country_name': 'US', 'staging_area': False, 'time_zone': 'Asia', 'longitude': 5, 'city': 'City', 'availability': 2, 'latitude': 10, 'protocols': [{ 'scheme': 'scheme1', 'hostname': 'hostname1', 'port': 1000, 'prefix': 'prefix', 'impl': 'impl1' }, { 'scheme': 'scheme2', 'hostname': 'hostname2', 'port': 1001, 'impl': 'impl' }], 'attributes': { 'attr1': 'test1', 'attr2': 'test2' }, 'MinFreeSpace': 10000, 'MaxBeingDeletedFiles': 1000, 'verify_checksum': False, 'lfn2pfn_algorithm': 'hash3', 'availability_delete': False, 'availability_read': False, 'availability_write': True }, self.old_rse_2: {}, self.old_rse_3: {} }, 'distances': { self.old_rse_1: { self.old_rse_2: { 'src_rse': self.old_rse_1, 'dest_rse': self.old_rse_2, 'ranking': 10 }, self.old_rse_3: { 'src_rse': self.old_rse_1, 'dest_rse': self.old_rse_3, 'ranking': 4 } } } } self.data2 = {'rses': {self.new_rse: {'rse': self.new_rse}}} self.data3 = {'distances': {}}
def request_transfer(once=False, src=None, dst=None): """ Main loop to request a new transfer. """ logging.info('request: starting') site_a = 'RSE%s' % generate_uuid().upper() site_b = 'RSE%s' % generate_uuid().upper() scheme = 'https' impl = 'rucio.rse.protocols.webdav.Default' if not src.startswith('https://'): scheme = 'srm' impl = 'rucio.rse.protocols.srm.Default' srctoken = src.split(':')[0] dsttoken = dst.split(':')[0] tmp_proto = { 'impl': impl, 'scheme': scheme, 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}} rse.add_rse(site_a) tmp_proto['hostname'] = src.split(':')[1][2:] tmp_proto['port'] = src.split(':')[2].split('/')[0] tmp_proto['prefix'] = '/'.join([''] + src.split(':')[2].split('/')[1:]) if scheme == 'srm': tmp_proto['extended_attributes'] = {'space_token': srctoken, 'web_service_path': ''} rse.add_protocol(site_a, tmp_proto) tmp_proto = { 'impl': impl, 'scheme': scheme, 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}} rse.add_rse(site_b) tmp_proto['hostname'] = dst.split(':')[1][2:] tmp_proto['port'] = dst.split(':')[2].split('/')[0] tmp_proto['prefix'] = '/'.join([''] + dst.split(':')[2].split('/')[1:]) if scheme == 'srm': tmp_proto['extended_attributes'] = {'space_token': dsttoken, 'web_service_path': ''} rse.add_protocol(site_b, tmp_proto) si = rsemanager.get_rse_info(site_a) session = get_session() logging.info('request: started') while not graceful_stop.is_set(): try: ts = time.time() tmp_name = generate_uuid() # add a new dataset did.add_did(scope='mock', name='dataset-%s' % tmp_name, type=DIDType.DATASET, account='root', session=session) # construct PFN pfn = rsemanager.lfns2pfns(si, lfns=[{'scope': 'mock', 'name': 'file-%s' % tmp_name}])['mock:file-%s' % tmp_name] # create the directories if needed p = rsemanager.create_protocol(si, operation='write', scheme=scheme) p.connect() try: p.mkdir(pfn) except: pass # upload the test file try: fp = os.path.dirname(config_get('injector', 'file')) fn = os.path.basename(config_get('injector', 'file')) p.put(fn, pfn, source_dir=fp) except: logging.critical('Could not upload, removing temporary DID: %s' % str(sys.exc_info())) did.delete_dids([{'scope': 'mock', 'name': 'dataset-%s' % tmp_name}], account='root', session=session) break # add the replica replica.add_replica(rse=site_a, scope='mock', name='file-%s' % tmp_name, bytes=config_get_int('injector', 'bytes'), adler32=config_get('injector', 'adler32'), md5=config_get('injector', 'md5'), account='root', session=session) # to the dataset did.attach_dids(scope='mock', name='dataset-%s' % tmp_name, dids=[{'scope': 'mock', 'name': 'file-%s' % tmp_name, 'bytes': config_get('injector', 'bytes')}], account='root', session=session) # add rule for the dataset ts = time.time() rule.add_rule(dids=[{'scope': 'mock', 'name': 'dataset-%s' % tmp_name}], account='root', copies=1, rse_expression=site_b, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None, activity='mock-injector', session=session) logging.info('added rule for %s for DID mock:%s' % (site_b, tmp_name)) record_timer('daemons.mock.conveyorinjector.add_rule', (time.time()-ts)*1000) record_counter('daemons.mock.conveyorinjector.request_transfer') session.commit() except: session.rollback() logging.critical(traceback.format_exc()) if once: return logging.info('request: graceful stop requested') logging.info('request: graceful stop done')