def source_rse(db_session, vo, dest_rse): rse = generate_rse(vo=vo, session=db_session) add_distance(rse['id'], dest_rse['id'], ranking=5, session=db_session) db_session.commit() yield rse del_rse(rse['id'], session=db_session) db_session.commit()
def test_list_archive_contents_at_rse(self): """ ARCHIVE (CORE): Transparent archive listing at RSE """ scope = 'mock' rse1 = 'APERTURE_%s' % rse_name_generator() add_rse(rse1) add_protocol(rse1, {'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) rse2 = 'BLACKMESA_%s' % rse_name_generator() add_rse(rse2) add_protocol(rse2, {'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//lambda/complex/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) # register archive archive1 = {'scope': scope, 'name': 'cube.1.zip', 'type': 'FILE', 'bytes': 2596, 'adler32': 'beefdead'} archive2 = {'scope': scope, 'name': 'cube.2.zip', 'type': 'FILE', 'bytes': 5432, 'adler32': 'deadbeef'} add_replicas(rse=rse1, files=[archive1], account='root') add_replicas(rse=rse2, files=[archive2], account='root') # archived files with replicas archived_file = [{'scope': scope, 'name': 'zippedfile-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 4322, 'adler32': 'beefbeef'} for i in xrange(2)] self.dc.add_files_to_archive(scope=scope, name=archive1['name'], files=archived_file) self.dc.add_files_to_archive(scope=scope, name=archive2['name'], files=archived_file) res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], rse_expression=rse1, resolve_archives=True)] res = self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], metalink=True, rse_expression=rse1, resolve_archives=True) assert_in('APERTURE', res) assert_not_in('BLACKMESA', res) res = self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], metalink=True, rse_expression=rse2, resolve_archives=True) assert_in('BLACKMESA', res) assert_not_in('APERTURE', res) del_rse(rse1) del_rse(rse2)
def test_list_archive_contents_transparently(self): """ ARCHIVE (CORE): Transparent archive listing """ scope = 'mock' rse = 'APERTURE_%s' % rse_name_generator() add_rse(rse) add_protocol(rse, {'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) # register archive archive = {'scope': scope, 'name': 'weighted.storage.cube.zip', 'type': 'FILE', 'bytes': 2596, 'adler32': 'beefdead'} add_replicas(rse=rse, files=[archive], account='root') # archived files with replicas files_with_replicas = [{'scope': scope, 'name': 'witrep-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 1234, 'adler32': 'deadbeef'} for i in xrange(2)] add_replicas(rse=rse, files=files_with_replicas, account='root') self.dc.add_files_to_archive(scope=scope, name=archive['name'], files=files_with_replicas) res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in files_with_replicas], resolve_archives=True)] assert_equal(len(res), 2) assert_equal(len(res[0]), 2) assert_equal(len(res[1]), 2) for r in res: for p in r: if r[p]['domain'] == 'zip': assert_in('weighted.storage.cube.zip?xrdcl.unzip=witrep-', p) else: assert_not_in('weighted.storage.cube.zip?xrdcl.unzip=witrep-', p) # archived files without replicas files = [{'scope': scope, 'name': 'norep-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 1234, 'adler32': 'deadbeef'} for i in xrange(2)] self.dc.add_files_to_archive(scope=scope, name=archive['name'], files=files) res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in files], resolve_archives=True)] assert_equal(len(res), 2) for r in res: assert_in('weighted.storage.cube.zip?xrdcl.unzip=norep-', r.keys()[0]) del_rse(rse)
def del_rse(rse, issuer): """ Disables a RSE with the provided RSE name. :param rse: The RSE name. :param issuer: The issuer account. """ kwargs = {'rse': rse} if not permission.has_permission(issuer=issuer, action='del_rse', kwargs=kwargs): raise exception.AccessDenied('Account %s can not delete RSE' % (issuer)) return rse_module.del_rse(rse)
def cleanup(self, session=None): if not self.created_rses: return # Cleanup Transfers session.query(models.Source).filter(or_(models.Source.dest_rse_id.in_(self.created_rses), models.Source.rse_id.in_(self.created_rses))).delete(synchronize_session=False) session.query(models.Request).filter(or_(models.Request.dest_rse_id.in_(self.created_rses), models.Request.source_rse_id.in_(self.created_rses))).delete(synchronize_session=False) # Cleanup Locks and Rules query = session.query(models.ReplicationRule.id). \ join(models.ReplicaLock, models.ReplicationRule.id == models.ReplicaLock.rule_id). \ filter(models.ReplicaLock.rse_id.in_(self.created_rses)).distinct() for rule_id, in query: rule_core.delete_rule(rule_id, session=session) # Cleanup Replicas and Parent Datasets query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id). \ filter(models.RSEFileAssociation.rse_id.in_(self.created_rses)) dids_by_rse = {} for scope, name, rse_id in query: dids_by_rse.setdefault(rse_id, []).append({'scope': scope, 'name': name}) for rse_id, dids in dids_by_rse.items(): replica_core.delete_replicas(rse_id=rse_id, files=dids, session=session) # Cleanup RSEs for model in (models.RSEAttrAssociation, models.RSEProtocols, models.UpdatedRSECounter, models.RSEUsage, models.RSELimit, models.RSETransferLimit, models.RSEQoSAssociation): session.query(model).filter(model.rse_id.in_(self.created_rses)).delete(synchronize_session=False) session.query(models.Distance).filter(or_(models.Distance.src_rse_id.in_(self.created_rses), models.Distance.dest_rse_id.in_(self.created_rses))).delete(synchronize_session=False) for rse_id in self.created_rses: # Only archive RSE instead of deleting. Account handling code doesn't expect RSEs to ever be deleted. # So running test in parallel results in some tests failing on foreign key errors. rse_core.del_rse(rse_id, session=session)
def del_rse(rse, issuer, vo='def'): """ Disables an RSE with the provided RSE name. :param rse: The RSE name. :param issuer: The issuer account. :param vo: The VO to act on. """ rse_id = rse_module.get_rse_id(rse=rse, vo=vo) kwargs = {'rse': rse, 'rse_id': rse_id} if not permission.has_permission(issuer=issuer, vo=vo, action='del_rse', kwargs=kwargs): raise exception.AccessDenied('Account %s can not delete RSE' % (issuer)) return rse_module.del_rse(rse_id)
def test_replica_sorting(self): """ REPLICA (CORE): Test the correct sorting of the replicas across WAN and LAN """ self.rc = ReplicaClient() self.rse1 = 'APERTURE_%s' % rse_name_generator() self.rse2 = 'BLACKMESA_%s' % rse_name_generator() self.rse1_id = add_rse(self.rse1, **self.vo) self.rse2_id = add_rse(self.rse2, **self.vo) add_rse_attribute(rse_id=self.rse1_id, key='site', value='APERTURE') add_rse_attribute(rse_id=self.rse2_id, key='site', value='BLACKMESA') self.files = [{ 'scope': InternalScope('mock', **self.vo), 'name': 'element_0', 'bytes': 1234, 'adler32': 'deadbeef' }] root = InternalAccount('root', **self.vo) add_replicas(rse_id=self.rse1_id, files=self.files, account=root) add_replicas(rse_id=self.rse2_id, files=self.files, account=root) add_protocol( self.rse1_id, { 'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( self.rse1_id, { 'scheme': 'davs', 'hostname': 'davs.aperture.com', 'port': 443, 'prefix': '/test/chamber/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }) add_protocol( self.rse1_id, { 'scheme': 'gsiftp', 'hostname': 'gsiftp.aperture.com', 'port': 8446, 'prefix': '/test/chamber/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 0, 'write': 0, 'delete': 0 }, 'wan': { 'read': 3, 'write': 3, 'delete': 3 } } }) add_protocol( self.rse2_id, { 'scheme': 'gsiftp', 'hostname': 'gsiftp.blackmesa.com', 'port': 8446, 'prefix': '/lambda/complex/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( self.rse2_id, { 'scheme': 'davs', 'hostname': 'davs.blackmesa.com', 'port': 443, 'prefix': '/lambda/complex/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 0, 'write': 0, 'delete': 0 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }) add_protocol( self.rse2_id, { 'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//lambda/complex/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 3, 'write': 3, 'delete': 3 } } }) replicas = [ r for r in self.rc.list_replicas( dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], client_location={'site': 'APERTURE'}) ] pfns = [r['pfns'] for r in replicas][0] assert_equal(len(pfns.keys()), 5) assert_equal( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['domain'], 'lan') assert_equal( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['priority'], 1) assert_equal( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['domain'], 'lan') assert_equal( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['priority'], 2) assert_equal( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['priority'], 3) assert_equal( pfns[ 'davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0'] ['priority'], 4) assert_equal( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['priority'], 5) replicas = [ r for r in self.rc.list_replicas( dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], client_location={'site': 'BLACKMESA'}) ] pfns = [r['pfns'] for r in replicas][0] assert_equal(len(pfns.keys()), 5) assert_equal( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['domain'], 'lan') assert_equal( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['priority'], 1) assert_equal( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['domain'], 'lan') assert_equal( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['priority'], 2) assert_equal( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['priority'], 3) assert_equal( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['priority'], 4) assert_equal( pfns[ 'gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0'] ['priority'], 5) replicas = [ r for r in self.rc.list_replicas(dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], client_location={'site': 'XEN'}) ] pfns = [r['pfns'] for r in replicas][0] assert_equal(len(pfns.keys()), 6) # TODO: intractable until RSE sorting is enabled assert_equal( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['priority'], [1, 2]) assert_equal( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['priority'], [1, 2]) assert_equal( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['priority'], [3, 4]) assert_equal( pfns[ 'davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0'] ['priority'], [3, 4]) assert_equal( pfns[ 'gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0'] ['priority'], [5, 6]) assert_equal( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['priority'], [5, 6]) ml = self.rc.list_replicas(dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], metalink=True, client_location={'site': 'APERTURE'}) assert_in( 'domain="lan" priority="1" client_extract="false">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="lan" priority="2" client_extract="false">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="3" client_extract="false">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="4" client_extract="false">davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="5" client_extract="false">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml) assert_not_in('priority="6"', ml) ml = self.rc.list_replicas(dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], metalink=True, client_location={'site': 'BLACKMESA'}) assert_in( 'domain="lan" priority="1" client_extract="false">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="lan" priority="2" client_extract="false">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="3" client_extract="false">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="4" client_extract="false">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="5" client_extract="false">gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0', ml) assert_not_in('priority="6"', ml) # TODO: intractable until RSE sorting is enabled # ml = self.rc.list_replicas(dids=[{'scope': 'mock', # 'name': f['name'], # 'type': 'FILE'} for f in self.files], # schemes=['root', 'gsiftp', 'davs'], # metalink=True, # client_location={'site': 'XEN'}) # assert_in('domain="wan" priority="1">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml) # assert_in('domain="wan" priority="2">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml) # assert_in('domain="wan" priority="3">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml) # assert_in('domain="wan" priority="4">davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0', ml) # assert_in('domain="wan" priority="5">gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0', ml) # assert_in('domain="wan" priority="6">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml) # assert_not_in('priority="7"', ml) # ensure correct handling of disabled protocols add_protocol( self.rse1_id, { 'scheme': 'root', 'hostname': 'root2.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 0, 'write': 0, 'delete': 0 } } }) ml = self.rc.list_replicas(dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], metalink=True, client_location={'site': 'BLACKMESA'}) assert_in( 'domain="lan" priority="1" client_extract="false">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="lan" priority="2" client_extract="false">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="3" client_extract="false">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="4" client_extract="false">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="5" client_extract="false">gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0', ml) assert_not_in('priority="6"', ml) delete_replicas(rse_id=self.rse1_id, files=self.files) delete_replicas(rse_id=self.rse2_id, files=self.files) del_rse(self.rse1_id) del_rse(self.rse2_id)
def test_list_dataset_replicas_archive(self): """ REPLICA (CLIENT): List dataset replicas with archives. """ replica_client = ReplicaClient() did_client = DIDClient() rule_client = RuleClient() scope = 'mock' rse = 'APERTURE_%s' % rse_name_generator() rse_id = add_rse(rse, **self.vo) add_protocol(rse_id=rse_id, parameter={ 'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) rse2 = 'BLACKMESA_%s' % rse_name_generator() rse2_id = add_rse(rse2, **self.vo) add_protocol(rse_id=rse2_id, parameter={ 'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//underground/facility', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) # register archive archive = { 'scope': scope, 'name': 'another.%s.zip' % generate_uuid(), 'type': 'FILE', 'bytes': 2596, 'adler32': 'deedbeaf' } replica_client.add_replicas(rse=rse, files=[archive]) replica_client.add_replicas(rse=rse2, files=[archive]) archived_files = [{ 'scope': scope, 'name': 'zippedfile-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 4322, 'adler32': 'deaddead' } for i in range(2)] replica_client.add_replicas(rse=rse2, files=archived_files) did_client.add_files_to_archive(scope=scope, name=archive['name'], files=archived_files) dataset_name = 'find_me.' + str(generate_uuid()) did_client.add_dataset(scope=scope, name=dataset_name) did_client.attach_dids(scope=scope, name=dataset_name, dids=archived_files) rule_client.add_replication_rule(dids=[{ 'scope': scope, 'name': dataset_name }], account='root', copies=1, rse_expression=rse, grouping='DATASET') res = [ r for r in replica_client.list_dataset_replicas(scope=scope, name=dataset_name) ] assert len(res) == 1 assert res[0]['state'] == 'UNAVAILABLE' res = [ r for r in replica_client.list_dataset_replicas( scope=scope, name=dataset_name, deep=True) ] assert len(res) == 3 assert res[0]['state'] == 'AVAILABLE' assert res[1]['state'] == 'AVAILABLE' assert res[2]['state'] == 'AVAILABLE' del_rse(rse_id)
def tearDownClass(cls): for rse_id in [cls.rse_with_proxy_id, cls.rse_without_proxy_id]: delete_replicas(rse_id=rse_id, files=cls.files) del_rse(cls.rse_with_proxy_id) del_rse(cls.rse_without_proxy_id)
def __cleanup_rses(self): for rse_id in self.created_rses: # Only archive RSE instead of deleting. Account handling code doesn't expect RSEs to ever be deleted. # So running test in parallel results in some tests failing on foreign key errors. rse_core.del_rse(rse_id)
def __exit__(self, exc_type, exc_val, exc_tb): del_rse(rse_id=self.rse_id, session=self.db_session) if self.teardown: self.teardown(self) self.db_session.commit()
def root_proxy_example_data(vo): rse_without_proxy = rse_name_generator() rse_without_proxy_id = add_rse(rse_without_proxy, vo=vo) add_rse_attribute(rse_id=rse_without_proxy_id, key='site', value='BLACKMESA1') rse_with_proxy = rse_name_generator() rse_with_proxy_id = add_rse(rse_with_proxy, vo=vo) add_rse_attribute(rse_id=rse_with_proxy_id, key='site', value='APERTURE1') # APERTURE1 site has an internal proxy config_set('root-proxy-internal', 'APERTURE1', 'proxy.aperture.com:1094') files = [{ 'scope': InternalScope('mock', vo=vo), 'name': 'half-life_%s' % i, 'bytes': 1234, 'adler32': 'deadbeef', 'meta': { 'events': 666 } } for i in range(1, 4)] for rse_id in [rse_with_proxy_id, rse_without_proxy_id]: add_replicas(rse_id=rse_id, files=files, account=InternalAccount('root', vo=vo), ignore_availability=True) add_protocol( rse_without_proxy_id, { 'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//training/facility/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( rse_with_proxy_id, { 'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) yield { 'files': files, 'rse_without_proxy': rse_without_proxy, 'rse_with_proxy': rse_with_proxy } for rse_id in [rse_with_proxy_id, rse_without_proxy_id]: delete_replicas(rse_id=rse_id, files=files) del_rse(rse_with_proxy_id) del_rse(rse_without_proxy_id)
def import_rses(rses, vo='def', session=None): new_rses = [] for rse_name in rses: rse = rses[rse_name] if isinstance(rse.get('rse_type'), string_types): rse['rse_type'] = RSEType.from_string(str(rse['rse_type'])) try: rse_id = rse_module.get_rse_id(rse=rse_name, vo=vo, session=session) except RSENotFound: rse_id = rse_module.add_rse(rse=rse_name, vo=vo, deterministic=rse.get('deterministic'), volatile=rse.get('volatile'), city=rse.get('city'), region_code=rse.get('region_code'), country_name=rse.get('country_name'), staging_area=rse.get('staging_area'), continent=rse.get('continent'), time_zone=rse.get('time_zone'), ISP=rse.get('ISP'), rse_type=rse.get('rse_type'), latitude=rse.get('latitude'), longitude=rse.get('longitude'), ASN=rse.get('ASN'), availability=rse.get('availability'), session=session) else: rse_module.update_rse(rse_id=rse_id, parameters=rse, session=session) new_rses.append(rse_id) # Protocols new_protocols = rse.get('protocols') if new_protocols: # update existing, add missing and remove left over protocols old_protocols = [{ 'scheme': protocol['scheme'], 'hostname': protocol['hostname'], 'port': protocol['port'] } for protocol in rse_module.get_rse_protocols( rse_id=rse_id, session=session)['protocols']] missing_protocols = [ new_protocol for new_protocol in new_protocols if { 'scheme': new_protocol['scheme'], 'hostname': new_protocol['hostname'], 'port': new_protocol['port'] } not in old_protocols ] outdated_protocols = [ new_protocol for new_protocol in new_protocols if { 'scheme': new_protocol['scheme'], 'hostname': new_protocol['hostname'], 'port': new_protocol['port'] } in old_protocols ] new_protocols = [{ 'scheme': protocol['scheme'], 'hostname': protocol['hostname'], 'port': protocol['port'] } for protocol in new_protocols] to_be_removed_protocols = [ old_protocol for old_protocol in old_protocols if old_protocol not in new_protocols ] for protocol in outdated_protocols: scheme = protocol['scheme'] port = protocol['port'] hostname = protocol['hostname'] del protocol['scheme'] del protocol['hostname'] del protocol['port'] rse_module.update_protocols(rse_id=rse_id, scheme=scheme, data=protocol, hostname=hostname, port=port, session=session) for protocol in missing_protocols: rse_module.add_protocol(rse_id=rse_id, parameter=protocol, session=session) for protocol in to_be_removed_protocols: scheme = protocol['scheme'] port = protocol['port'] hostname = protocol['hostname'] rse_module.del_protocols(rse_id=rse_id, scheme=scheme, port=port, hostname=hostname, session=session) # Limits old_limits = rse_module.get_rse_limits(rse_id=rse_id, session=session) for limit_name in ['MaxBeingDeletedFiles', 'MinFreeSpace']: limit = rse.get(limit_name) if limit: if limit_name in old_limits: rse_module.delete_rse_limit(rse_id=rse_id, name=limit_name, session=session) rse_module.set_rse_limits(rse_id=rse_id, name=limit_name, value=limit, session=session) # Attributes attributes = rse.get('attributes', {}) attributes['lfn2pfn_algorithm'] = rse.get('lfn2pfn_algorithm') attributes['verify_checksum'] = rse.get('verify_checksum') old_attributes = rse_module.list_rse_attributes(rse_id=rse_id, session=session) for attr in attributes: value = attributes[attr] if value is not None: if attr in old_attributes: rse_module.del_rse_attribute(rse_id=rse_id, key=attr, session=session) rse_module.add_rse_attribute(rse_id=rse_id, key=attr, value=value, session=session) # set deleted flag to RSEs that are missing in the import data old_rses = [ old_rse['id'] for old_rse in rse_module.list_rses(session=session) ] for old_rse in old_rses: if old_rse not in new_rses: try: rse_module.del_rse(rse_id=old_rse, session=session) except RSEOperationNotSupported: pass
def tearDown(self): delete_replicas(rse=self.rse1, files=self.files) delete_replicas(rse=self.rse2, files=self.files) del_rse(self.rse1) del_rse(self.rse2)
def tearDown(self): delete_replicas(rse_id=self.rse1_id, files=self.files) delete_replicas(rse_id=self.rse2_id, files=self.files) del_rse(rse_id=self.rse1_id) del_rse(rse_id=self.rse2_id)
def tearDown(self): for rse_id in [self.rse_with_proxy_id, self.rse_without_proxy_id]: delete_replicas(rse_id=rse_id, files=self.files) del_rse(self.rse_with_proxy_id) del_rse(self.rse_without_proxy_id)
def protocols_setup(vo): rse_info = copy.deepcopy(base_rse_info) files = [{ 'scope': InternalScope('mock', vo=vo), 'name': 'element_0', 'bytes': 1234, 'adler32': 'deadbeef' }] root = InternalAccount('root', vo=vo) for idx in range(len(rse_info)): rse_info[idx]['name'] = '%s_%s' % (rse_info[idx]['site'], rse_name_generator()) rse_info[idx]['id'] = add_rse(rse_info[idx]['name'], vo=vo) add_rse_attribute(rse_id=rse_info[idx]['id'], key='site', value=base_rse_info[idx]['site']) add_replicas(rse_id=rse_info[idx]['id'], files=files, account=root) # invalidate cache for parse_expression('site=…') rse_expression_parser.REGION.invalidate() # check sites for idx in range(len(rse_info)): site_rses = rse_expression_parser.parse_expression( 'site=' + base_rse_info[idx]['site']) assert len(site_rses) > 0 assert rse_info[idx]['id'] in [rse['id'] for rse in site_rses] add_protocol( rse_info[0]['id'], { 'scheme': schemes[0], 'hostname': ('root.%s' % base_rse_info[0]['address']), 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( rse_info[0]['id'], { 'scheme': schemes[2], 'hostname': ('davs.%s' % base_rse_info[0]['address']), 'port': 443, 'prefix': '/test/chamber/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }) add_protocol( rse_info[0]['id'], { 'scheme': schemes[1], 'hostname': ('gsiftp.%s' % base_rse_info[0]['address']), 'port': 8446, 'prefix': '/test/chamber/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 0, 'write': 0, 'delete': 0 }, 'wan': { 'read': 3, 'write': 3, 'delete': 3 } } }) add_protocol( rse_info[1]['id'], { 'scheme': schemes[1], 'hostname': ('gsiftp.%s' % base_rse_info[1]['address']), 'port': 8446, 'prefix': '/lambda/complex/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( rse_info[1]['id'], { 'scheme': schemes[2], 'hostname': ('davs.%s' % base_rse_info[1]['address']), 'port': 443, 'prefix': '/lambda/complex/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 0, 'write': 0, 'delete': 0 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }) add_protocol( rse_info[1]['id'], { 'scheme': schemes[0], 'hostname': ('root.%s' % base_rse_info[1]['address']), 'port': 1409, 'prefix': '//lambda/complex/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 3, 'write': 3, 'delete': 3 } } }) yield {'files': files, 'rse_info': rse_info} for info in rse_info: delete_replicas(rse_id=info['id'], files=files) del_rse_attribute(rse_id=info['id'], key='site') del_rse(info['id'])
def tearDown(self): delete_replicas(rse_id=self.rses3_id, files=self.files3) delete_replicas(rse_id=self.rsenons3_id, files=self.filenons3) del_rse(self.rses3_id) del_rse(self.rsenons3_id)
def tearDown(self): for rse in [self.rse_with_proxy, self.rse_without_proxy]: delete_replicas(rse=rse, files=self.files) del_rse(self.rse_with_proxy) del_rse(self.rse_without_proxy)
def import_rses(rses, rse_sync_method='edit', attr_sync_method='edit', protocol_sync_method='edit', vo='def', session=None): new_rses = [] for rse_name in rses: rse = rses[rse_name] if isinstance(rse.get('rse_type'), string_types): rse['rse_type'] = RSEType(rse['rse_type']) if rse_module.rse_exists(rse_name, vo=vo, include_deleted=False, session=session): # RSE exists and is active rse_id = rse_module.get_rse_id(rse=rse_name, vo=vo, session=session) selected_rse_properties = { key: rse[key] for key in rse if key in rse_module.MUTABLE_RSE_PROPERTIES } rse_module.update_rse(rse_id=rse_id, parameters=selected_rse_properties, session=session) elif rse_module.rse_exists(rse_name, vo=vo, include_deleted=True, session=session): # RSE exists but in deleted state # Should only modify the RSE if importer is configured for edit or hard sync if rse_sync_method in ['edit', 'hard']: rse_id = rse_module.get_rse_id(rse=rse_name, vo=vo, include_deleted=True, session=session) rse_module.restore_rse(rse_id, session=session) selected_rse_properties = { key: rse[key] for key in rse if key in rse_module.MUTABLE_RSE_PROPERTIES } rse_module.update_rse(rse_id=rse_id, parameters=selected_rse_properties, session=session) else: # Config is in RSE append only mode, should not modify the disabled RSE continue else: rse_id = rse_module.add_rse(rse=rse_name, vo=vo, deterministic=rse.get('deterministic'), volatile=rse.get('volatile'), city=rse.get('city'), region_code=rse.get('region_code'), country_name=rse.get('country_name'), staging_area=rse.get('staging_area'), continent=rse.get('continent'), time_zone=rse.get('time_zone'), ISP=rse.get('ISP'), rse_type=rse.get('rse_type'), latitude=rse.get('latitude'), longitude=rse.get('longitude'), ASN=rse.get('ASN'), availability=rse.get('availability'), session=session) new_rses.append(rse_id) # Protocols new_protocols = rse.get('protocols') if new_protocols: # update existing, add missing and remove left over protocols old_protocols = [{ 'scheme': protocol['scheme'], 'hostname': protocol['hostname'], 'port': protocol['port'] } for protocol in rse_module.get_rse_protocols( rse_id=rse_id, session=session)['protocols']] missing_protocols = [ new_protocol for new_protocol in new_protocols if { 'scheme': new_protocol['scheme'], 'hostname': new_protocol['hostname'], 'port': new_protocol['port'] } not in old_protocols ] outdated_protocols = [ new_protocol for new_protocol in new_protocols if { 'scheme': new_protocol['scheme'], 'hostname': new_protocol['hostname'], 'port': new_protocol['port'] } in old_protocols ] new_protocols = [{ 'scheme': protocol['scheme'], 'hostname': protocol['hostname'], 'port': protocol['port'] } for protocol in new_protocols] to_be_removed_protocols = [ old_protocol for old_protocol in old_protocols if old_protocol not in new_protocols ] if protocol_sync_method == 'append': outdated_protocols = [] for protocol in outdated_protocols: scheme = protocol['scheme'] port = protocol['port'] hostname = protocol['hostname'] del protocol['scheme'] del protocol['hostname'] del protocol['port'] rse_module.update_protocols(rse_id=rse_id, scheme=scheme, data=protocol, hostname=hostname, port=port, session=session) for protocol in missing_protocols: rse_module.add_protocol(rse_id=rse_id, parameter=protocol, session=session) if protocol_sync_method == 'hard': for protocol in to_be_removed_protocols: scheme = protocol['scheme'] port = protocol['port'] hostname = protocol['hostname'] rse_module.del_protocols(rse_id=rse_id, scheme=scheme, port=port, hostname=hostname, session=session) # Limits old_limits = rse_module.get_rse_limits(rse_id=rse_id, session=session) for limit_name in ['MaxBeingDeletedFiles', 'MinFreeSpace']: limit = rse.get(limit_name) if limit: if limit_name in old_limits: rse_module.delete_rse_limits(rse_id=rse_id, name=limit_name, session=session) rse_module.set_rse_limits(rse_id=rse_id, name=limit_name, value=limit, session=session) # Attributes attributes = rse.get('attributes', {}) attributes['lfn2pfn_algorithm'] = rse.get('lfn2pfn_algorithm') attributes['verify_checksum'] = rse.get('verify_checksum') old_attributes = rse_module.list_rse_attributes(rse_id=rse_id, session=session) missing_attributes = [ attribute for attribute in old_attributes if attribute not in attributes ] for attr in attributes: value = attributes[attr] if value is not None: if attr in old_attributes: if attr_sync_method not in ['append']: rse_module.del_rse_attribute(rse_id=rse_id, key=attr, session=session) rse_module.add_rse_attribute(rse_id=rse_id, key=attr, value=value, session=session) else: rse_module.add_rse_attribute(rse_id=rse_id, key=attr, value=value, session=session) if attr_sync_method == 'hard': for attr in missing_attributes: if attr != rse_name: rse_module.del_rse_attribute(rse_id=rse_id, key=attr, session=session) # set deleted flag to RSEs that are missing in the import data old_rses = [ old_rse['id'] for old_rse in rse_module.list_rses(session=session) ] if rse_sync_method == 'hard': for old_rse in old_rses: if old_rse not in new_rses: try: rse_module.del_rse(rse_id=old_rse, session=session) except RSEOperationNotSupported: pass