def __add_test_rse_and_replicas(vo, scope, rse_name, names, file_size, epoch_tombstone=False): rse_id = rse_core.add_rse(rse_name, vo=vo) rse_core.add_protocol(rse_id=rse_id, parameter=__mock_protocol) tombstone = datetime.utcnow() - timedelta(days=1) if epoch_tombstone: tombstone = datetime(year=1970, month=1, day=1) dids = [] for file_name in names: dids.append({'scope': scope, 'name': file_name}) replica_core.add_replica(rse_id=rse_id, scope=scope, name=file_name, bytes_=file_size, tombstone=tombstone, account=InternalAccount('root', vo=vo), adler32=None, md5=None) return rse_name, rse_id, dids
def generate_rse(endpoint, token): rse_name = 'RSE%s' % generate_uuid().upper() scheme = 'https' impl = 'rucio.rse.protocols.webdav.Default' if not endpoint.startswith('https://'): scheme = 'srm' impl = 'rucio.rse.protocols.srm.Default' tmp_proto = { 'impl': impl, 'scheme': scheme, 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}} rse_id = rse.add_rse(rse_name) tmp_proto['hostname'] = endpoint.split(':')[1][2:] tmp_proto['port'] = endpoint.split(':')[2].split('/')[0] tmp_proto['prefix'] = '/'.join([''] + endpoint.split(':')[2].split('/')[1:]) if scheme == 'srm': tmp_proto['extended_attributes'] = {'space_token': token, 'web_service_path': '/srm/managerv2?SFN='} rse.add_protocol(rse_id=rse_id, parameter=tmp_proto) rse.add_rse_attribute(rse_id=rse_id, key='fts', value='https://fts3-pilot.cern.ch:8446') account_limit.set_account_limit(account='root', rse_id=rsemanager.get_rse_info(rse_name)['id'], bytes=-1) return rsemanager.get_rse_info(rse_name)
def _make_rse(self, scheme, protocol_impl, parameters=None, add_rse_kwargs=None): rse_name = rse_name_generator() rse_id = rse_core.add_rse(rse_name, vo=self.vo, **(add_rse_kwargs or {})) if scheme and protocol_impl: rse_core.add_protocol(rse_id=rse_id, parameter={ 'scheme': scheme, 'hostname': 'host%d' % len(self.created_rses), 'port': 0, 'prefix': '/test/', 'impl': protocol_impl, 'domains': { 'wan': { 'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1 } }, **(parameters or {}) }) self.created_rses.append(rse_id) return rse_name, rse_id
def _make_rse(self, scheme, protocol_impl, parameters=None, add_rse_kwargs=None): rse_name = rse_name_generator() if add_rse_kwargs and 'vo' in add_rse_kwargs: rse_id = rse_core.add_rse(rse_name, **add_rse_kwargs) else: rse_id = rse_core.add_rse(rse_name, vo=self.vo, **(add_rse_kwargs or {})) if scheme and protocol_impl: protocol_parameters = { 'scheme': scheme, 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': protocol_impl, 'domains': { 'wan': { 'read': 1, 'write': 1, 'delete': 1, 'third_party_copy_read': 1, 'third_party_copy_write': 1, } } } protocol_parameters.update(parameters or {}) rse_core.add_protocol(rse_id=rse_id, parameter=protocol_parameters) self.created_rses.append(rse_id) return rse_name, rse_id
def create_rse_psdm(): prefix = "/reg/d/psdm" if not os.path.exists(prefix): os.makedirs(prefix) params = { 'scheme': 'file', 'prefix': prefix, 'impl': 'rucio.rse.protocols.posix.Default', 'domains': { "lan": { "read": 1, "write": 1, "delete": 1 }, "wan": { "read": 1, "write": 1, "delete": 1 } } } add_rse('PSDM_DISK', 'root', deterministic=False) add_protocol('PSDM_DISK', params) add_rse_attribute(rse='PSDM_DISK', key='istape', value='False')
def test_list_replica_with_domain(self): """ REPLICA (CORE): Add and list file replicas forcing domain""" tmp_rse = rse_name_generator() add_rse(tmp_rse) protocols = [ { 'scheme': 'MOCK', 'hostname': 'localhost', 'port': 17, 'prefix': '/i/prefer/the/lan', 'impl': 'rucio.rse.protocols.mock.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }, { 'scheme': 'MOCK', 'hostname': 'localhost', 'port': 18, 'prefix': '/i/prefer/the/wan', 'impl': 'rucio.rse.protocols.mock.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }, ] for p in protocols: add_protocol(tmp_rse, p) nbfiles = 3 files = [{ 'scope': 'mock', 'name': 'file_%s' % generate_uuid(), 'bytes': 1234L, 'adler32': '01234567', 'meta': { 'events': 1234 } } for i in range(nbfiles)]
def test_list_archive_contents_transparently(self): """ ARCHIVE (CORE): Transparent archive listing """ scope = InternalScope('mock', **self.vo) rse = 'APERTURE_%s' % rse_name_generator() rse_id = add_rse(rse, **self.vo) root = InternalAccount('root', **self.vo) add_protocol(rse_id, {'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) # register archive archive = {'scope': scope, 'name': 'weighted.storage.cube.zip', 'type': 'FILE', 'bytes': 2596, 'adler32': 'beefdead'} archive_client = archive.copy() archive_client['scope'] = archive_client['scope'].external add_replicas(rse_id=rse_id, files=[archive], account=root) # archived files with replicas files_with_replicas = [{'scope': scope, 'name': 'witrep-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 1234, 'adler32': 'deadbeef'} for i in range(2)] files_with_replicas_client = [] for f in files_with_replicas: new_file = f.copy() new_file['scope'] = new_file['scope'].external files_with_replicas_client.append(new_file) add_replicas(rse_id=rse_id, files=files_with_replicas, account=root) self.dc.add_files_to_archive(scope=archive_client['scope'], name=archive_client['name'], files=files_with_replicas_client) res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files_with_replicas_client], resolve_archives=True)] assert_equal(len(res), 2) assert_equal(len(res[0]), 2) assert_equal(len(res[1]), 2) for r in res: for p in r: if r[p]['domain'] == 'zip': assert_in('weighted.storage.cube.zip?xrdcl.unzip=witrep-', p) else: assert_not_in('weighted.storage.cube.zip?xrdcl.unzip=witrep-', p) # archived files without replicas files = [{'scope': scope.external, 'name': 'norep-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 1234, 'adler32': 'deadbeef'} for i in range(2)] self.dc.add_files_to_archive(scope=archive_client['scope'], name=archive_client['name'], files=files) res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files], resolve_archives=True)] assert_equal(len(res), 2) for r in res: assert_in('weighted.storage.cube.zip?xrdcl.unzip=norep-', list(r.keys())[0])
def test_list_archive_contents_at_rse(self): """ ARCHIVE (CORE): Transparent archive listing at RSE """ scope = 'mock' rse1 = 'APERTURE_%s' % rse_name_generator() add_rse(rse1) add_protocol(rse1, {'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) rse2 = 'BLACKMESA_%s' % rse_name_generator() add_rse(rse2) add_protocol(rse2, {'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//lambda/complex/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) # register archive archive1 = {'scope': scope, 'name': 'cube.1.zip', 'type': 'FILE', 'bytes': 2596, 'adler32': 'beefdead'} archive2 = {'scope': scope, 'name': 'cube.2.zip', 'type': 'FILE', 'bytes': 5432, 'adler32': 'deadbeef'} add_replicas(rse=rse1, files=[archive1], account='root') add_replicas(rse=rse2, files=[archive2], account='root') # archived files with replicas archived_file = [{'scope': scope, 'name': 'zippedfile-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 4322, 'adler32': 'beefbeef'} for i in xrange(2)] self.dc.add_files_to_archive(scope=scope, name=archive1['name'], files=archived_file) self.dc.add_files_to_archive(scope=scope, name=archive2['name'], files=archived_file) res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], rse_expression=rse1, resolve_archives=True)] res = self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], metalink=True, rse_expression=rse1, resolve_archives=True) assert_in('APERTURE', res) assert_not_in('BLACKMESA', res) res = self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], metalink=True, rse_expression=rse2, resolve_archives=True) assert_in('BLACKMESA', res) assert_not_in('APERTURE', res) del_rse(rse1) del_rse(rse2)
def add_protocol(rse, issuer, **data): """ Creates a new protocol entry for an existing RSE. :param rse: The RSE name. :param issuer: The issuer account. :param data: Parameters (protocol identifier, port, hostname, ...) provided by the request. """ kwargs = {'rse': rse} if not permission.has_permission(issuer=issuer, action='add_protocol', kwargs=kwargs): raise exception.AccessDenied('Account %s can not add protocols to RSE %s' % (issuer, rse)) rse_module.add_protocol(rse, data['data'])
def setup(self): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')} else: self.vo = {} self.rc = client.ReplicaClient() self.rse1 = rse_name_generator() self.rse2 = rse_name_generator() self.rse1_id = add_rse(self.rse1, **self.vo) self.rse2_id = add_rse(self.rse2, **self.vo) add_protocol(self.rse1_id, {'scheme': 'https', 'hostname': 'storage.googleapis.com', 'port': 443, 'prefix': '/atlas-europe-west1/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) add_protocol(self.rse2_id, {'scheme': 'https', 'hostname': 'storage.googleapis.com', 'port': 443, 'prefix': '/atlas-europe-east1/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) # register some files there self.files = [{'scope': InternalScope('mock', **self.vo), 'name': 'file-on-gcs_%s' % i, 'bytes': 1234, 'adler32': 'deadbeef', 'meta': {'events': 666}} for i in range(0, 3)] root = InternalAccount('root', **self.vo) add_replicas(rse_id=self.rse1_id, files=self.files, account=root, ignore_availability=True) add_replicas(rse_id=self.rse2_id, files=self.files, account=root, ignore_availability=True) def tearDown(self): delete_replicas(rse_id=self.rse1_id, files=self.files) delete_replicas(rse_id=self.rse2_id, files=self.files) del_rse(rse_id=self.rse1_id) del_rse(rse_id=self.rse2_id)
def test_reaper(): """ REAPER2 (DAEMON): Test the reaper2 daemon.""" if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')} else: vo = {} rse_name = rse_name_generator() rse_id = rse_core.add_rse(rse_name, **vo) mock_protocol = {'scheme': 'MOCK', 'hostname': 'localhost', 'port': 123, 'prefix': '/test/reaper', 'impl': 'rucio.rse.protocols.mock.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}} rse_core.add_protocol(rse_id=rse_id, parameter=mock_protocol) nb_files = 30 file_size = 2147483648 # 2G file_names = [] for i in range(nb_files): file_name = 'lfn' + generate_uuid() file_names.append(file_name) replica_core.add_replica(rse_id=rse_id, scope=InternalScope('data13_hip', **vo), name=file_name, bytes=file_size, tombstone=datetime.utcnow() - timedelta(days=1), account=InternalAccount('root', **vo), adler32=None, md5=None) rse_core.set_rse_usage(rse_id=rse_id, source='storage', used=nb_files * file_size, free=800) rse_core.set_rse_limits(rse_id=rse_id, name='MinFreeSpace', value=10737418240) rse_core.set_rse_limits(rse_id=rse_id, name='MaxBeingDeletedFiles', value=10) if vo: reaper(once=True, rses=[], include_rses='vo=%s&(%s)' % (vo['vo'], rse_name), exclude_rses=[]) reaper(once=True, rses=[], include_rses='vo=%s&(%s)' % (vo['vo'], rse_name), exclude_rses=[]) else: reaper(once=True, rses=[], include_rses=rse_name, exclude_rses=[]) reaper(once=True, rses=[], include_rses=rse_name, exclude_rses=[]) assert len(list(replica_core.list_replicas(dids=[{'scope': InternalScope('data13_hip', **vo), 'name': n} for n in file_names], rse_expression=rse_name))) == nb_files - 5
def setup(self): self.rc = client.ReplicaClient() self.rse1 = rse_name_generator() self.rse2 = rse_name_generator() add_rse(self.rse1) add_rse(self.rse2) add_protocol(self.rse1, {'scheme': 'https', 'hostname': 'storage.googleapis.com', 'port': 443, 'prefix': '/atlas-europe-west1/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) add_protocol(self.rse2, {'scheme': 'https', 'hostname': 'storage.googleapis.com', 'port': 443, 'prefix': '/atlas-europe-east1/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) # register some files there self.files = [{'scope': 'mock', 'name': 'file-on-gcs_%s' % i, 'bytes': 1234, 'adler32': 'deadbeef', 'meta': {'events': 666}} for i in range(0, 3)] add_replicas(rse=self.rse1, files=self.files, account='root', ignore_availability=True) add_replicas(rse=self.rse2, files=self.files, account='root', ignore_availability=True) def tearDown(self): delete_replicas(rse=self.rse1, files=self.files) delete_replicas(rse=self.rse2, files=self.files) del_rse(self.rse1) del_rse(self.rse2)
def setUp(self): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): self.vo = {'vo': get_vo()} else: self.vo = {} self.root = InternalAccount('root', **self.vo) # add an S3 storage with a replica self.rc = client.ReplicaClient() self.rses3 = rse_name_generator() self.rses3_id = add_rse(self.rses3, **self.vo) add_protocol(self.rses3_id, {'scheme': 'https', 'hostname': 'fake-rucio.s3-eu-south-8.amazonaws.com', 'port': 443, 'prefix': '/', 'impl': 'rucio.rse.protocols.gfal.NoRename', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) add_rse_attribute(rse_id=self.rses3_id, key='sign_url', value='s3') add_rse_attribute(rse_id=self.rses3_id, key='fts', value='localhost') self.files3 = [{'scope': InternalScope('mock', **self.vo), 'name': 'file-on-aws', 'bytes': 1234, 'adler32': 'deadbeef', 'meta': {'events': 123}}] add_replicas(rse_id=self.rses3_id, files=self.files3, account=self.root) # add a non-S3 storage with a replica self.rsenons3 = rse_name_generator() self.rsenons3_id = add_rse(self.rsenons3, **self.vo) add_protocol(self.rsenons3_id, {'scheme': 'https', 'hostname': 'somestorage.ch', 'port': 1094, 'prefix': '/my/prefix', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}}) add_rse_attribute(rse_id=self.rsenons3_id, key='fts', value='localhost') self.filenons3 = [{'scope': InternalScope('mock', **self.vo), 'name': 'file-on-storage', 'bytes': 1234, 'adler32': 'deadbeef', 'meta': {'events': 321}}] add_replicas(rse_id=self.rsenons3_id, files=self.filenons3, account=self.root) # set the distance both ways add_distance(self.rses3_id, self.rsenons3_id, ranking=1, agis_distance=1, geoip_distance=1) add_distance(self.rsenons3_id, self.rses3_id, ranking=1, agis_distance=1, geoip_distance=1)
def test_upload_file_with_impl(rse_factory, upload_client, mock_scope, file_factory): """ Upload (CLIENT): Ensure the module associated to the impl value is called """ impl = 'xrootd' rse_name, rse_id = rse_factory.make_rse() add_protocol(rse_id, {'scheme': 'file', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.posix.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) add_protocol(rse_id, {'scheme': 'root', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 2, 'write': 2, 'delete': 2}, 'wan': {'read': 2, 'write': 2, 'delete': 2}}}) path = file_factory.file_generator() name = os.path.basename(path) item = { 'path': path, 'rse': rse_name, 'did_scope': str(mock_scope), 'did_name': name, 'guid': generate_uuid(), 'impl': impl } with TemporaryDirectory() as tmp_dir: with patch('rucio.rse.protocols.%s.Default.put' % impl, side_effect=lambda pfn, dest, dir, **kw: shutil.copy(path, tmp_dir)) as mock_put, \ patch('rucio.rse.protocols.%s.Default.connect' % impl),\ patch('rucio.rse.protocols.%s.Default.exists' % impl, side_effect=lambda pfn, **kw: False),\ patch('rucio.rse.protocols.%s.Default.delete' % impl),\ patch('rucio.rse.protocols.%s.Default.rename' % impl),\ patch('rucio.rse.protocols.%s.Default.stat' % impl, side_effect=lambda pfn: {'filesize': os.stat(path)[os.path.stat.ST_SIZE], 'adler32': adler32(path)}),\ patch('rucio.rse.protocols.%s.Default.close' % impl): mock_put.__name__ = "mock_put" upload_client.upload([item]) mock_put.assert_called()
def _make_rse(self, scheme, protocol_impl): rse_name = rse_name_generator() rse_id = rse_core.add_rse(rse_name, vo=self.vo) rse_core.add_protocol(rse_id=rse_id, parameter={ 'scheme': scheme, 'hostname': 'host%d' % len(self.created_rses), 'port': 0, 'prefix': '/test', 'impl': protocol_impl, 'domains': { 'wan': { 'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1 } } }) self.created_rses.append(rse_id) return rse_name, rse_id
def add_protocol(rse, issuer, vo='def', session=None, **data): """ Creates a new protocol entry for an existing RSE. :param rse: The RSE name. :param issuer: The issuer account. :param vo: The VO to act on. :param data: Parameters (protocol identifier, port, hostname, ...) provided by the request. :param session: The database session in use. """ rse_id = rse_module.get_rse_id(rse=rse, vo=vo, session=session) kwargs = {'rse': rse, 'rse_id': rse_id} if not permission.has_permission(issuer=issuer, vo=vo, action='add_protocol', kwargs=kwargs, session=session): raise exception.AccessDenied( 'Account %s can not add protocols to RSE %s' % (issuer, rse)) rse_module.add_protocol(rse_id, data['data'], session=session)
def test_upload_file_with_supported_protocol_from_config(rse_factory, upload_client, mock_scope, file_factory): """ Upload (CLIENT): Ensure the module associated to the first protocol supported by both the remote and local config read from rucio.cfg is called """ rse_name, rse_id = rse_factory.make_rse() # FIXME: # The correct order to test should actually be ssh,xrootd,posix # However the preferred_impl is not working correctly. # Once preferred_impl is fixed, this should be changed back add_protocol(rse_id, {'scheme': 'scp', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) add_protocol(rse_id, {'scheme': 'file', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.posix.Default', 'domains': { 'lan': {'read': 2, 'write': 2, 'delete': 2}, 'wan': {'read': 2, 'write': 2, 'delete': 2}}}) add_protocol(rse_id, {'scheme': 'root', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.ssh.Default', 'domains': { 'lan': {'read': 3, 'write': 3, 'delete': 3}, 'wan': {'read': 3, 'write': 3, 'delete': 3}}}) config_add_section('upload') config_set('upload', 'preferred_impl', 'rclone, xrootd') supported_impl = 'xrootd' path = file_factory.file_generator() name = os.path.basename(path) item = { 'path': path, 'rse': rse_name, 'did_scope': str(mock_scope), 'did_name': name, 'guid': generate_uuid() } with TemporaryDirectory() as tmp_dir: with patch('rucio.rse.protocols.%s.Default.put' % supported_impl, side_effect=lambda pfn, dest, dir, **kw: shutil.copy(path, tmp_dir)) as mock_put, \ patch('rucio.rse.protocols.%s.Default.connect' % supported_impl),\ patch('rucio.rse.protocols.%s.Default.exists' % supported_impl, side_effect=lambda pfn, **kw: False),\ patch('rucio.rse.protocols.%s.Default.delete' % supported_impl),\ patch('rucio.rse.protocols.%s.Default.rename' % supported_impl),\ patch('rucio.rse.protocols.%s.Default.stat' % supported_impl, side_effect=lambda pfn: {'filesize': os.stat(path)[os.path.stat.ST_SIZE], 'adler32': adler32(path)}),\ patch('rucio.rse.protocols.%s.Default.close' % supported_impl): mock_put.__name__ = "mock_put" upload_client.upload([item]) mock_put.assert_called()
def test_multiple_protocols_same_scheme(rse_factory, upload_client, mock_scope, file_factory): """ Upload (CLIENT): Ensure domain correctly selected when multiple protocols exist with the same scheme """ rse, rse_id = rse_factory.make_rse() # Ensure client site and rse site are identical. So that "lan" is preferred. add_rse_attribute(rse_id, 'site', 'ROAMING') add_protocol(rse_id, {'scheme': 'file', 'hostname': 'file-wan.aperture.com', 'port': 0, 'prefix': '/prefix1/', 'impl': 'rucio.rse.protocols.posix.Default', 'domains': { 'lan': {'read': 0, 'write': 0, 'delete': 0}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) add_protocol(rse_id, {'scheme': 'file', 'hostname': 'file-lan.aperture.com', 'port': 0, 'prefix': '/prefix2/', 'impl': 'rucio.rse.protocols.posix.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 0, 'write': 0, 'delete': 0}}}) add_protocol(rse_id, {'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1403, 'prefix': '/prefix3/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 2, 'write': 2, 'delete': 2}, 'wan': {'read': 2, 'write': 2, 'delete': 2}}}) # Upload a file path = file_factory.file_generator() name = os.path.basename(path) item = { 'path': path, 'rse': rse, 'did_scope': str(mock_scope), 'did_name': name, 'guid': generate_uuid(), } summary_path = file_factory.base_dir / 'summary' upload_client.upload([item], summary_file_path=summary_path) # Verify that the lan protocol was used for the upload with open(summary_path) as json_file: data = json.load(json_file) assert 'file-lan.aperture.com' in data['{}:{}'.format(mock_scope, name)]['pfn']
def test_upload_file_with_supported_protocol(rse_factory, upload_client, mock_scope, file_factory): """ Upload (CLIENT): Ensure the module associated to the first protocol supported by both the remote and local config is called """ rse_name, rse_id = rse_factory.make_rse() # FIXME: # The correct order to test should actually be ssh,xrootd,posix # However the preferred_impl is not working correctly. # Once preferred_impl is fixed, this should be changed back add_protocol(rse_id, {'scheme': 'scp', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.posix.Default', 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}}) add_protocol(rse_id, {'scheme': 'root', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': {'read': 2, 'write': 2, 'delete': 2}, 'wan': {'read': 2, 'write': 2, 'delete': 2}}}) add_protocol(rse_id, {'scheme': 'file', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.ssh.Default', 'domains': { 'lan': {'read': 3, 'write': 3, 'delete': 3}, 'wan': {'read': 3, 'write': 3, 'delete': 3}}}) path = file_factory.file_generator() name = os.path.basename(path) item = { 'path': path, 'rse': rse_name, 'did_scope': str(mock_scope), 'did_name': name, 'guid': generate_uuid() } status = upload_client.upload([item]) assert status == 0
for rse in [self.rse_with_proxy, self.rse_without_proxy]: add_replicas(rse=rse, files=self.files, account='root', ignore_availability=True) add_protocol( self.rse_without_proxy, { 'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//training/facility/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( self.rse_with_proxy, { 'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409,
def protocols_setup(vo): rse_info = copy.deepcopy(base_rse_info) files = [{ 'scope': InternalScope('mock', vo=vo), 'name': 'element_0', 'bytes': 1234, 'adler32': 'deadbeef' }] root = InternalAccount('root', vo=vo) for idx in range(len(rse_info)): rse_info[idx]['name'] = '%s_%s' % (rse_info[idx]['site'], rse_name_generator()) rse_info[idx]['id'] = add_rse(rse_info[idx]['name'], vo=vo) add_rse_attribute(rse_id=rse_info[idx]['id'], key='site', value=base_rse_info[idx]['site']) add_replicas(rse_id=rse_info[idx]['id'], files=files, account=root) # invalidate cache for parse_expression('site=…') rse_expression_parser.REGION.invalidate() # check sites for idx in range(len(rse_info)): site_rses = rse_expression_parser.parse_expression( 'site=' + base_rse_info[idx]['site']) assert len(site_rses) > 0 assert rse_info[idx]['id'] in [rse['id'] for rse in site_rses] add_protocol( rse_info[0]['id'], { 'scheme': schemes[0], 'hostname': ('root.%s' % base_rse_info[0]['address']), 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( rse_info[0]['id'], { 'scheme': schemes[2], 'hostname': ('davs.%s' % base_rse_info[0]['address']), 'port': 443, 'prefix': '/test/chamber/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }) add_protocol( rse_info[0]['id'], { 'scheme': schemes[1], 'hostname': ('gsiftp.%s' % base_rse_info[0]['address']), 'port': 8446, 'prefix': '/test/chamber/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 0, 'write': 0, 'delete': 0 }, 'wan': { 'read': 3, 'write': 3, 'delete': 3 } } }) add_protocol( rse_info[1]['id'], { 'scheme': schemes[1], 'hostname': ('gsiftp.%s' % base_rse_info[1]['address']), 'port': 8446, 'prefix': '/lambda/complex/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( rse_info[1]['id'], { 'scheme': schemes[2], 'hostname': ('davs.%s' % base_rse_info[1]['address']), 'port': 443, 'prefix': '/lambda/complex/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 0, 'write': 0, 'delete': 0 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }) add_protocol( rse_info[1]['id'], { 'scheme': schemes[0], 'hostname': ('root.%s' % base_rse_info[1]['address']), 'port': 1409, 'prefix': '//lambda/complex/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 3, 'write': 3, 'delete': 3 } } }) yield {'files': files, 'rse_info': rse_info} for info in rse_info: delete_replicas(rse_id=info['id'], files=files) del_rse_attribute(rse_id=info['id'], key='site') del_rse(info['id'])
def setup(self): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): self.vo_header = {'X-Rucio-VO': 'tst'} self.vo = {'vo': 'tst'} else: self.vo_header = {} self.vo = {} # New RSE self.new_rse = rse_name_generator() # RSE 1 that already exists self.old_rse_1 = rse_name_generator() self.old_rse_id_1 = add_rse(self.old_rse_1, availability=1, region_code='DE', country_name='DE', deterministic=True, volatile=True, staging_area=True, time_zone='Europe', latitude='1', longitude='2', **self.vo) add_protocol( self.old_rse_id_1, { 'scheme': 'scheme1', 'hostname': 'hostname1', 'port': 1000, 'impl': 'TODO' }) add_protocol( self.old_rse_id_1, { 'scheme': 'scheme3', 'hostname': 'hostname3', 'port': 1000, 'impl': 'TODO' }) set_rse_limits(rse_id=self.old_rse_id_1, name='MaxBeingDeletedFiles', value='10') set_rse_limits(rse_id=self.old_rse_id_1, name='MinFreeSpace', value='10') add_rse_attribute(rse_id=self.old_rse_id_1, key='attr1', value='test10') add_rse_attribute(rse_id=self.old_rse_id_1, key='lfn2pfn_algorithm', value='test10') add_rse_attribute(rse_id=self.old_rse_id_1, key='verify_checksum', value=True) # RSE 2 that already exists self.old_rse_2 = rse_name_generator() self.old_rse_id_2 = add_rse(self.old_rse_2, **self.vo) # RSE 3 that already exists self.old_rse_3 = rse_name_generator() self.old_rse_id_3 = add_rse(self.old_rse_3, **self.vo) # RSE 4 that already exists self.old_rse_4 = rse_name_generator() self.old_rse_id_4 = add_rse(self.old_rse_4, **self.vo) # Distance that already exists add_distance(self.old_rse_id_1, self.old_rse_id_2) self.data1 = { 'rses': { self.new_rse: { 'rse_type': RSEType.TAPE, 'availability': 3, 'city': 'NewCity', 'region_code': 'CH', 'country_name': 'switzerland', 'staging_area': False, 'time_zone': 'Europe', 'latitude': 1, 'longitude': 2, 'deterministic': True, 'volatile': False, 'protocols': [{ 'scheme': 'scheme', 'hostname': 'hostname', 'port': 1000, 'impl': 'impl' }], 'attributes': { 'attr1': 'test' }, 'MinFreeSpace': 20000, 'lfn2pfn_algorithm': 'hash2', 'verify_checksum': False, 'availability_delete': True, 'availability_read': False, 'availability_write': True }, self.old_rse_1: { 'rse_type': RSEType.TAPE, 'deterministic': False, 'volatile': False, 'region_code': 'US', 'country_name': 'US', 'staging_area': False, 'time_zone': 'Asia', 'longitude': 5, 'city': 'City', 'availability': 2, 'latitude': 10, 'protocols': [{ 'scheme': 'scheme1', 'hostname': 'hostname1', 'port': 1000, 'prefix': 'prefix', 'impl': 'impl1' }, { 'scheme': 'scheme2', 'hostname': 'hostname2', 'port': 1001, 'impl': 'impl' }], 'attributes': { 'attr1': 'test1', 'attr2': 'test2' }, 'MinFreeSpace': 10000, 'MaxBeingDeletedFiles': 1000, 'verify_checksum': False, 'lfn2pfn_algorithm': 'hash3', 'availability_delete': False, 'availability_read': False, 'availability_write': True }, self.old_rse_2: {}, self.old_rse_3: {} }, 'distances': { self.old_rse_1: { self.old_rse_2: { 'src_rse': self.old_rse_1, 'dest_rse': self.old_rse_2, 'ranking': 10 }, self.old_rse_3: { 'src_rse': self.old_rse_1, 'dest_rse': self.old_rse_3, 'ranking': 4 } } } } self.data2 = {'rses': {self.new_rse: {'rse': self.new_rse}}} self.data3 = {'distances': {}}
def import_data(data, session=None): """ Import data to add and update records in Rucio. :param data: data to be imported as dictionary. :param session: database session in use. """ # RSEs rses = data.get('rses') if rses: for rse in rses: protocols = rse.get('protocols') if protocols: protocols = protocols.get('protocols') del rse['protocols'] rse_name = rse['rse'] del rse['rse'] if not rse_module.rse_exists(rse_name, session=session): rse_module.add_rse(rse_name, deterministic=rse.get('deterministic'), volatile=rse.get('volatile'), city=rse.get('city'), region_code=rse.get('region_code'), country_name=rse.get('country_name'), staging_area=rse.get('staging_area'), continent=rse.get('continent'), time_zone=rse.get('time_zone'), ISP=rse.get('ISP'), rse_type=rse.get('rse_type'), latitude=rse.get('latitude'), longitude=rse.get('longitude'), ASN=rse.get('ASN'), availability=rse.get('availability'), session=session) else: rse_module.update_rse(rse_name, rse, session=session) # Protocols if protocols: old_protocols = rse_module.get_rse_protocols(rse=rse_name, session=session) for protocol in protocols: scheme = protocol.get('scheme') hostname = protocol.get('hostname') port = protocol.get('port') intersection = [ old_protocol for old_protocol in old_protocols['protocols'] if old_protocol['scheme'] == scheme and old_protocol['hostname'] == hostname and old_protocol['port'] == port ] if intersection: del protocol['scheme'] del protocol['hostname'] del protocol['port'] rse_module.update_protocols(rse=rse_name, scheme=scheme, data=protocol, hostname=hostname, port=port, session=session) else: rse_module.add_protocol(rse=rse_name, parameter=protocol, session=session) # Limits limits = rse.get('limits') if limits: old_limits = rse_module.get_rse_limits(rse=rse_name, session=session) for limit in limits: if limit in old_limits: rse_module.delete_rse_limit(rse=rse_name, name=limit, session=session) rse_module.set_rse_limits(rse=rse_name, name=limit, value=limits[limit], session=session) # Transfer limits transfer_limits = rse.get('transfer_limits') if transfer_limits: for limit in transfer_limits: old_transfer_limits = rse_module.get_rse_transfer_limits( rse=rse_name, activity=limit, session=session) if limit in old_transfer_limits: rse_module.delete_rse_transfer_limits(rse=rse_name, activity=limit, session=session) max_transfers = transfer_limits[limit].items( )[0][1]['max_transfers'] rse_module.set_rse_transfer_limits( rse=rse_name, activity=limit, max_transfers=max_transfers, session=session) # Attributes attributes = rse.get('attributes') if attributes: old_attributes = rse_module.list_rse_attributes( rse=rse_name, session=session) for attr in attributes: if attr in old_attributes: rse_module.del_rse_attribute(rse=rse_name, key=attr, session=session) rse_module.add_rse_attribute(rse=rse_name, key=attr, value=attributes[attr], session=session) # Distances distances = data.get('distances') if distances: for src_rse_name in distances: src = rse_module.get_rse_id(src_rse_name, session=session) for dest_rse_name in distances[src_rse_name]: dest = rse_module.get_rse_id(dest_rse_name, session=session) distance = distances[src_rse_name][dest_rse_name] del distance['src_rse_id'] del distance['dest_rse_id'] old_distance = distance_module.get_distances(src_rse_id=src, dest_rse_id=dest, session=session) if old_distance: distance_module.update_distances(src_rse_id=src, dest_rse_id=dest, parameters=distance, session=session) else: distance_module.add_distance( src_rse_id=src, dest_rse_id=dest, ranking=distance.get('ranking'), agis_distance=distance.get('agis_distance'), geoip_distance=distance.get('geoip_distance'), active=distance.get('active'), submitted=distance.get('submitted'), transfer_speed=distance.get('transfer_speed'), finished=distance.get('finished'), failed=distance.get('failed'), session=session)
def test_reaper(): """ REAPER2 (DAEMON): Test the reaper2 daemon.""" if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): vo = { 'vo': config_get('client', 'vo', raise_exception=False, default='tst') } new_vo = {'vo': 'new'} if not vo_core.vo_exists(**new_vo): vo_core.add_vo(description='Test', email='*****@*****.**', **new_vo) if not scope_core.check_scope(InternalScope('data13_hip', **new_vo)): scope_core.add_scope(InternalScope('data13_hip', **new_vo), InternalAccount('root', **new_vo)) nb_rses = 2 else: vo = {} new_vo = {} nb_rses = 1 mock_protocol = { 'scheme': 'MOCK', 'hostname': 'localhost', 'port': 123, 'prefix': '/test/reaper', 'impl': 'rucio.rse.protocols.mock.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } } nb_files = 30 file_size = 2147483648 # 2G rse_names = [] all_file_names = [] for j in range(nb_rses): rse_name = rse_name_generator() rse_names.append(rse_name) rse_id = rse_core.add_rse(rse_name, **vo) rse_core.add_protocol(rse_id=rse_id, parameter=mock_protocol) if new_vo: rse_id_new = rse_core.add_rse(rse_name, **new_vo) rse_core.add_protocol(rse_id=rse_id_new, parameter=mock_protocol) file_names = [] for i in range(nb_files): file_name = 'lfn' + generate_uuid() file_names.append(file_name) replica_core.add_replica(rse_id=rse_id, scope=InternalScope('data13_hip', **vo), name=file_name, bytes=file_size, tombstone=datetime.utcnow() - timedelta(days=1), account=InternalAccount('root', **vo), adler32=None, md5=None) if new_vo: replica_core.add_replica( rse_id=rse_id_new, scope=InternalScope('data13_hip', **new_vo), name=file_name, bytes=file_size, tombstone=datetime.utcnow() - timedelta(days=1), account=InternalAccount('root', **new_vo), adler32=None, md5=None) all_file_names.append(file_names) rse_core.set_rse_usage(rse_id=rse_id, source='storage', used=nb_files * file_size, free=800) rse_core.set_rse_limits(rse_id=rse_id, name='MinFreeSpace', value=10737418240) rse_core.set_rse_limits(rse_id=rse_id, name='MaxBeingDeletedFiles', value=10) if new_vo: rse_core.set_rse_usage(rse_id=rse_id_new, source='storage', used=nb_files * file_size, free=800) rse_core.set_rse_limits(rse_id=rse_id_new, name='MinFreeSpace', value=10737418240) rse_core.set_rse_limits(rse_id=rse_id_new, name='MaxBeingDeletedFiles', value=10) if not vo: reaper(once=True, rses=[], include_rses=rse_names[0], exclude_rses=[]) reaper(once=True, rses=[], include_rses=rse_names[0], exclude_rses=[]) assert len( list( replica_core.list_replicas( dids=[{ 'scope': InternalScope('data13_hip', **vo), 'name': n } for n in all_file_names[0]], rse_expression=rse_name))) == nb_files - 5 else: # Check we reap all VOs by default reaper(once=True, rses=[], include_rses=rse_names[0], exclude_rses=[]) reaper(once=True, rses=[], include_rses=rse_names[0], exclude_rses=[]) assert len( list( replica_core.list_replicas( dids=[{ 'scope': InternalScope('data13_hip', **vo), 'name': n } for n in all_file_names[0]], rse_expression=rse_names[0]))) == nb_files - 5 assert len( list( replica_core.list_replicas( dids=[{ 'scope': InternalScope('data13_hip', **new_vo), 'name': n } for n in all_file_names[0]], rse_expression=rse_names[0]))) == nb_files - 5 # Check we don't affect a second VO that isn't specified reaper(once=True, rses=[], include_rses=rse_names[1], exclude_rses=[], vos=['new']) reaper(once=True, rses=[], include_rses=rse_names[1], exclude_rses=[], vos=['new']) assert len( list( replica_core.list_replicas( dids=[{ 'scope': InternalScope('data13_hip', **vo), 'name': n } for n in all_file_names[1]], rse_expression=rse_names[1]))), nb_files assert len( list( replica_core.list_replicas( dids=[{ 'scope': InternalScope('data13_hip', **new_vo), 'name': n } for n in all_file_names[1]], rse_expression=rse_names[1]))), nb_files - 5
def test_download_file_with_impl(rse_factory, did_factory, download_client, mock_scope): """ Download (CLIENT): Ensure the module associated to the impl value is called """ impl = 'xrootd' rse, rse_id = rse_factory.make_rse() add_protocol( rse_id, { 'scheme': 'file', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.posix.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( rse_id, { 'scheme': 'root', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }) path = file_generator() name = os.path.basename(path) item = { 'path': path, 'rse': rse, 'did_scope': str(mock_scope), 'did_name': name, 'guid': generate_uuid(), } did_factory.upload_client.upload([item]) did_str = '%s:%s' % (mock_scope, name) with patch('rucio.rse.protocols.%s.Default.get' % impl, side_effect=lambda pfn, dest, **kw: shutil.copy(path, dest)) as mock_get, \ patch('rucio.rse.protocols.%s.Default.connect' % impl),\ patch('rucio.rse.protocols.%s.Default.close' % impl): download_client.download_dids([{'did': did_str, 'impl': impl}]) mock_get.assert_called()
def root_proxy_example_data(vo): rse_without_proxy = rse_name_generator() rse_without_proxy_id = add_rse(rse_without_proxy, vo=vo) add_rse_attribute(rse_id=rse_without_proxy_id, key='site', value='BLACKMESA1') rse_with_proxy = rse_name_generator() rse_with_proxy_id = add_rse(rse_with_proxy, vo=vo) add_rse_attribute(rse_id=rse_with_proxy_id, key='site', value='APERTURE1') # APERTURE1 site has an internal proxy config_set('root-proxy-internal', 'APERTURE1', 'proxy.aperture.com:1094') files = [{ 'scope': InternalScope('mock', vo=vo), 'name': 'half-life_%s' % i, 'bytes': 1234, 'adler32': 'deadbeef', 'meta': { 'events': 666 } } for i in range(1, 4)] for rse_id in [rse_with_proxy_id, rse_without_proxy_id]: add_replicas(rse_id=rse_id, files=files, account=InternalAccount('root', vo=vo), ignore_availability=True) add_protocol( rse_without_proxy_id, { 'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//training/facility/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( rse_with_proxy_id, { 'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) yield { 'files': files, 'rse_without_proxy': rse_without_proxy, 'rse_with_proxy': rse_with_proxy } for rse_id in [rse_with_proxy_id, rse_without_proxy_id]: delete_replicas(rse_id=rse_id, files=files) del_rse(rse_with_proxy_id) del_rse(rse_without_proxy_id)
def test_download_file_with_supported_protocol_from_config( rse_factory, did_factory, download_client, mock_scope): """ Download (CLIENT): Ensure the module associated to the first protocol supported by both the remote and local config read from rucio.cfg is called """ rse, rse_id = rse_factory.make_rse() # FIXME: # The correct order to test should actually be scp,file,root # However the preferred_impl is not working correctly. # Once preferred_impl is fixed, this should be changed back add_protocol( rse_id, { 'scheme': 'scp', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.posix.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( rse_id, { 'scheme': 'file', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.scp.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }) add_protocol( rse_id, { 'scheme': 'root', 'hostname': '%s.cern.ch' % rse_id, 'port': 0, 'prefix': '/test/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 3, 'write': 3, 'delete': 3 }, 'wan': { 'read': 3, 'write': 3, 'delete': 3 } } }) config_add_section('download') config_set('download', 'preferred_impl', 'rclone, xrootd') supported_impl = 'xrootd' path = file_generator() name = os.path.basename(path) item = { 'path': path, 'rse': rse, 'did_scope': str(mock_scope), 'did_name': name, 'guid': generate_uuid(), } did_factory.upload_client.upload([item]) did_str = '%s:%s' % (mock_scope, name) with patch('rucio.rse.protocols.%s.Default.get' % supported_impl, side_effect=lambda pfn, dest, **kw: shutil.copy(path, dest)) as mock_get, \ patch('rucio.rse.protocols.%s.Default.connect' % supported_impl),\ patch('rucio.rse.protocols.%s.Default.close' % supported_impl): download_client.download_dids([{ 'did': did_str, 'impl': supported_impl }]) mock_get.assert_called()
def request_transfer(once=False, src=None, dst=None): """ Main loop to request a new transfer. """ logging.info('request: starting') site_a = 'RSE%s' % generate_uuid().upper() site_b = 'RSE%s' % generate_uuid().upper() scheme = 'https' impl = 'rucio.rse.protocols.webdav.Default' if not src.startswith('https://'): scheme = 'srm' impl = 'rucio.rse.protocols.srm.Default' srctoken = src.split(':')[0] dsttoken = dst.split(':')[0] tmp_proto = { 'impl': impl, 'scheme': scheme, 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}} rse.add_rse(site_a) tmp_proto['hostname'] = src.split(':')[1][2:] tmp_proto['port'] = src.split(':')[2].split('/')[0] tmp_proto['prefix'] = '/'.join([''] + src.split(':')[2].split('/')[1:]) if scheme == 'srm': tmp_proto['extended_attributes'] = {'space_token': srctoken, 'web_service_path': ''} rse.add_protocol(site_a, tmp_proto) tmp_proto = { 'impl': impl, 'scheme': scheme, 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}} rse.add_rse(site_b) tmp_proto['hostname'] = dst.split(':')[1][2:] tmp_proto['port'] = dst.split(':')[2].split('/')[0] tmp_proto['prefix'] = '/'.join([''] + dst.split(':')[2].split('/')[1:]) if scheme == 'srm': tmp_proto['extended_attributes'] = {'space_token': dsttoken, 'web_service_path': ''} rse.add_protocol(site_b, tmp_proto) si = rsemanager.get_rse_info(site_a) session = get_session() logging.info('request: started') while not graceful_stop.is_set(): try: ts = time.time() tmp_name = generate_uuid() # add a new dataset did.add_did(scope='mock', name='dataset-%s' % tmp_name, type=DIDType.DATASET, account='root', session=session) # construct PFN pfn = rsemanager.lfns2pfns(si, lfns=[{'scope': 'mock', 'name': 'file-%s' % tmp_name}])['mock:file-%s' % tmp_name] # create the directories if needed p = rsemanager.create_protocol(si, operation='write', scheme=scheme) p.connect() try: p.mkdir(pfn) except: pass # upload the test file try: fp = os.path.dirname(config_get('injector', 'file')) fn = os.path.basename(config_get('injector', 'file')) p.put(fn, pfn, source_dir=fp) except: logging.critical('Could not upload, removing temporary DID: %s' % str(sys.exc_info())) did.delete_dids([{'scope': 'mock', 'name': 'dataset-%s' % tmp_name}], account='root', session=session) break # add the replica replica.add_replica(rse=site_a, scope='mock', name='file-%s' % tmp_name, bytes=config_get_int('injector', 'bytes'), adler32=config_get('injector', 'adler32'), md5=config_get('injector', 'md5'), account='root', session=session) # to the dataset did.attach_dids(scope='mock', name='dataset-%s' % tmp_name, dids=[{'scope': 'mock', 'name': 'file-%s' % tmp_name, 'bytes': config_get('injector', 'bytes')}], account='root', session=session) # add rule for the dataset ts = time.time() rule.add_rule(dids=[{'scope': 'mock', 'name': 'dataset-%s' % tmp_name}], account='root', copies=1, rse_expression=site_b, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None, activity='mock-injector', session=session) logging.info('added rule for %s for DID mock:%s' % (site_b, tmp_name)) record_timer('daemons.mock.conveyorinjector.add_rule', (time.time()-ts)*1000) record_counter('daemons.mock.conveyorinjector.request_transfer') session.commit() except: session.rollback() logging.critical(traceback.format_exc()) if once: return logging.info('request: graceful stop requested') logging.info('request: graceful stop done')
def test_replica_sorting(self): """ REPLICA (CORE): Test the correct sorting of the replicas across WAN and LAN """ self.rc = ReplicaClient() self.rse1 = 'APERTURE_%s' % rse_name_generator() self.rse2 = 'BLACKMESA_%s' % rse_name_generator() self.rse1_id = add_rse(self.rse1, **self.vo) self.rse2_id = add_rse(self.rse2, **self.vo) add_rse_attribute(rse_id=self.rse1_id, key='site', value='APERTURE') add_rse_attribute(rse_id=self.rse2_id, key='site', value='BLACKMESA') self.files = [{ 'scope': InternalScope('mock', **self.vo), 'name': 'element_0', 'bytes': 1234, 'adler32': 'deadbeef' }] root = InternalAccount('root', **self.vo) add_replicas(rse_id=self.rse1_id, files=self.files, account=root) add_replicas(rse_id=self.rse2_id, files=self.files, account=root) add_protocol( self.rse1_id, { 'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( self.rse1_id, { 'scheme': 'davs', 'hostname': 'davs.aperture.com', 'port': 443, 'prefix': '/test/chamber/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }) add_protocol( self.rse1_id, { 'scheme': 'gsiftp', 'hostname': 'gsiftp.aperture.com', 'port': 8446, 'prefix': '/test/chamber/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 0, 'write': 0, 'delete': 0 }, 'wan': { 'read': 3, 'write': 3, 'delete': 3 } } }) add_protocol( self.rse2_id, { 'scheme': 'gsiftp', 'hostname': 'gsiftp.blackmesa.com', 'port': 8446, 'prefix': '/lambda/complex/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 2, 'write': 2, 'delete': 2 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( self.rse2_id, { 'scheme': 'davs', 'hostname': 'davs.blackmesa.com', 'port': 443, 'prefix': '/lambda/complex/', 'impl': 'rucio.rse.protocols.gfal.Default', 'domains': { 'lan': { 'read': 0, 'write': 0, 'delete': 0 }, 'wan': { 'read': 2, 'write': 2, 'delete': 2 } } }) add_protocol( self.rse2_id, { 'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//lambda/complex/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 3, 'write': 3, 'delete': 3 } } }) replicas = [ r for r in self.rc.list_replicas( dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], client_location={'site': 'APERTURE'}) ] pfns = [r['pfns'] for r in replicas][0] assert_equal(len(pfns.keys()), 5) assert_equal( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['domain'], 'lan') assert_equal( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['priority'], 1) assert_equal( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['domain'], 'lan') assert_equal( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['priority'], 2) assert_equal( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['priority'], 3) assert_equal( pfns[ 'davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0'] ['priority'], 4) assert_equal( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['priority'], 5) replicas = [ r for r in self.rc.list_replicas( dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], client_location={'site': 'BLACKMESA'}) ] pfns = [r['pfns'] for r in replicas][0] assert_equal(len(pfns.keys()), 5) assert_equal( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['domain'], 'lan') assert_equal( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['priority'], 1) assert_equal( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['domain'], 'lan') assert_equal( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['priority'], 2) assert_equal( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['priority'], 3) assert_equal( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['priority'], 4) assert_equal( pfns[ 'gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_equal( pfns[ 'gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0'] ['priority'], 5) replicas = [ r for r in self.rc.list_replicas(dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], client_location={'site': 'XEN'}) ] pfns = [r['pfns'] for r in replicas][0] assert_equal(len(pfns.keys()), 6) # TODO: intractable until RSE sorting is enabled assert_equal( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0'] ['priority'], [1, 2]) assert_equal( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0'] ['priority'], [1, 2]) assert_equal( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0'] ['priority'], [3, 4]) assert_equal( pfns[ 'davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0'] ['priority'], [3, 4]) assert_equal( pfns[ 'gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0'] ['priority'], [5, 6]) assert_equal( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['domain'], 'wan') assert_in( pfns[ 'root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0'] ['priority'], [5, 6]) ml = self.rc.list_replicas(dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], metalink=True, client_location={'site': 'APERTURE'}) assert_in( 'domain="lan" priority="1" client_extract="false">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="lan" priority="2" client_extract="false">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="3" client_extract="false">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="4" client_extract="false">davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="5" client_extract="false">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml) assert_not_in('priority="6"', ml) ml = self.rc.list_replicas(dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], metalink=True, client_location={'site': 'BLACKMESA'}) assert_in( 'domain="lan" priority="1" client_extract="false">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="lan" priority="2" client_extract="false">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="3" client_extract="false">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="4" client_extract="false">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="5" client_extract="false">gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0', ml) assert_not_in('priority="6"', ml) # TODO: intractable until RSE sorting is enabled # ml = self.rc.list_replicas(dids=[{'scope': 'mock', # 'name': f['name'], # 'type': 'FILE'} for f in self.files], # schemes=['root', 'gsiftp', 'davs'], # metalink=True, # client_location={'site': 'XEN'}) # assert_in('domain="wan" priority="1">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml) # assert_in('domain="wan" priority="2">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml) # assert_in('domain="wan" priority="3">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml) # assert_in('domain="wan" priority="4">davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0', ml) # assert_in('domain="wan" priority="5">gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0', ml) # assert_in('domain="wan" priority="6">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml) # assert_not_in('priority="7"', ml) # ensure correct handling of disabled protocols add_protocol( self.rse1_id, { 'scheme': 'root', 'hostname': 'root2.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 0, 'write': 0, 'delete': 0 } } }) ml = self.rc.list_replicas(dids=[{ 'scope': 'mock', 'name': f['name'], 'type': 'FILE' } for f in self.files], schemes=['root', 'gsiftp', 'davs'], metalink=True, client_location={'site': 'BLACKMESA'}) assert_in( 'domain="lan" priority="1" client_extract="false">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="lan" priority="2" client_extract="false">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="3" client_extract="false">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="4" client_extract="false">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml) assert_in( 'domain="wan" priority="5" client_extract="false">gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0', ml) assert_not_in('priority="6"', ml) delete_replicas(rse_id=self.rse1_id, files=self.files) delete_replicas(rse_id=self.rse2_id, files=self.files) del_rse(self.rse1_id) del_rse(self.rse2_id)
def test_list_dataset_replicas_archive(self): """ REPLICA (CLIENT): List dataset replicas with archives. """ replica_client = ReplicaClient() did_client = DIDClient() rule_client = RuleClient() scope = 'mock' rse = 'APERTURE_%s' % rse_name_generator() rse_id = add_rse(rse, **self.vo) add_protocol(rse_id=rse_id, parameter={ 'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) rse2 = 'BLACKMESA_%s' % rse_name_generator() rse2_id = add_rse(rse2, **self.vo) add_protocol(rse_id=rse2_id, parameter={ 'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//underground/facility', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) # register archive archive = { 'scope': scope, 'name': 'another.%s.zip' % generate_uuid(), 'type': 'FILE', 'bytes': 2596, 'adler32': 'deedbeaf' } replica_client.add_replicas(rse=rse, files=[archive]) replica_client.add_replicas(rse=rse2, files=[archive]) archived_files = [{ 'scope': scope, 'name': 'zippedfile-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE', 'bytes': 4322, 'adler32': 'deaddead' } for i in range(2)] replica_client.add_replicas(rse=rse2, files=archived_files) did_client.add_files_to_archive(scope=scope, name=archive['name'], files=archived_files) dataset_name = 'find_me.' + str(generate_uuid()) did_client.add_dataset(scope=scope, name=dataset_name) did_client.attach_dids(scope=scope, name=dataset_name, dids=archived_files) rule_client.add_replication_rule(dids=[{ 'scope': scope, 'name': dataset_name }], account='root', copies=1, rse_expression=rse, grouping='DATASET') res = [ r for r in replica_client.list_dataset_replicas(scope=scope, name=dataset_name) ] assert len(res) == 1 assert res[0]['state'] == 'UNAVAILABLE' res = [ r for r in replica_client.list_dataset_replicas( scope=scope, name=dataset_name, deep=True) ] assert len(res) == 3 assert res[0]['state'] == 'AVAILABLE' assert res[1]['state'] == 'AVAILABLE' assert res[2]['state'] == 'AVAILABLE' del_rse(rse_id)
def setUpClass(self): if config_get_bool('common', 'multi_vo', raise_exception=False, default=False): self.vo = { 'vo': config_get('client', 'vo', raise_exception=False, default='tst') } self.vo_header = {'X-Rucio-VO': self.vo['vo']} else: self.vo = {} self.vo_header = {} self.rc = ReplicaClient() self.client_location_without_proxy = { 'ip': '192.168.0.1', 'fqdn': 'anomalous-materials.blackmesa.com', 'site': 'BLACKMESA1' } self.rse_without_proxy = rse_name_generator() self.rse_without_proxy_id = add_rse(self.rse_without_proxy, **self.vo) add_rse_attribute(rse_id=self.rse_without_proxy_id, key='site', value='BLACKMESA1') self.client_location_with_proxy = { 'ip': '10.0.1.1', 'fqdn': 'test-chamber.aperture.com', 'site': 'APERTURE1' } self.rse_with_proxy = rse_name_generator() self.rse_with_proxy_id = add_rse(self.rse_with_proxy, **self.vo) add_rse_attribute(rse_id=self.rse_with_proxy_id, key='site', value='APERTURE1') # APERTURE1 site has an internal proxy config_set('root-proxy-internal', 'APERTURE1', 'proxy.aperture.com:1094') self.files = [{ 'scope': InternalScope('mock', **self.vo), 'name': 'half-life_%s' % i, 'bytes': 1234, 'adler32': 'deadbeef', 'meta': { 'events': 666 } } for i in range(1, 4)] for rse_id in [self.rse_with_proxy_id, self.rse_without_proxy_id]: add_replicas(rse_id=rse_id, files=self.files, account=InternalAccount('root', **self.vo), ignore_availability=True) add_protocol( self.rse_without_proxy_id, { 'scheme': 'root', 'hostname': 'root.blackmesa.com', 'port': 1409, 'prefix': '//training/facility/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } }) add_protocol( self.rse_with_proxy_id, { 'scheme': 'root', 'hostname': 'root.aperture.com', 'port': 1409, 'prefix': '//test/chamber/', 'impl': 'rucio.rse.protocols.xrootd.Default', 'domains': { 'lan': { 'read': 1, 'write': 1, 'delete': 1 }, 'wan': { 'read': 1, 'write': 1, 'delete': 1 } } })