コード例 #1
0
    def test_list_rses_based_on_availability(self):
        """ RSE_EXPRESSION_PARSER (CORE) List rses based on availability filter"""
        rseWRITE_name = rse_name_generator()
        rseNOWRITE_name = rse_name_generator()

        rseWRITE_id = rse.add_rse(rseWRITE_name)
        rseNOWRITE_id = rse.add_rse(rseNOWRITE_name)

        attribute = attribute_name_generator()

        rse.add_rse_attribute(rseWRITE_name, attribute, "de")
        rse.add_rse_attribute(rseNOWRITE_name, attribute, "de")

        rse.update_rse(rseWRITE_name, {'availability_write': True})
        rse.update_rse(rseNOWRITE_name, {'availability_write': False})

        assert_equal(sorted([item['id'] for item in rse_expression_parser.parse_expression("%s=de" % attribute)]),
                     sorted([rseWRITE_id, rseNOWRITE_id]))

        assert_equal(sorted([item['id'] for item in rse_expression_parser.parse_expression("%s=de" % attribute, {'availability_write': True})]),
                     sorted([rseWRITE_id]))

        assert_raises(InvalidRSEExpression, rse_expression_parser.parse_expression, "%s=de" % attribute, {'availability_write': False})
コード例 #2
0
ファイル: rse.py プロジェクト: pombredanne/rucio
def add_rse_attribute(rse, key, value, issuer):
    """ Adds a RSE attribute.

    :param rse: the rse name.
    :param key: the key name.
    :param value: the value name.
    :param issuer: The issuer account.

    returns: True if successful, False otherwise.
    """

    kwargs = {'rse': rse, 'key': key, 'value': value}
    if not permission.has_permission(issuer=issuer, action='add_rse_attribute', kwargs=kwargs):
        raise exception.AccessDenied('Account %s can not add RSE attributes' % (issuer))

    return rse_module.add_rse_attribute(rse=rse, key=key, value=value)
コード例 #3
0
ファイル: test_replica_sorting.py プロジェクト: stfc/rucio
def protocols_setup(vo):
    rse_info = copy.deepcopy(base_rse_info)

    files = [{
        'scope': InternalScope('mock', vo=vo),
        'name': 'element_0',
        'bytes': 1234,
        'adler32': 'deadbeef'
    }]
    root = InternalAccount('root', vo=vo)

    for idx in range(len(rse_info)):
        rse_info[idx]['name'] = '%s_%s' % (rse_info[idx]['site'],
                                           rse_name_generator())
        rse_info[idx]['id'] = add_rse(rse_info[idx]['name'], vo=vo)
        add_rse_attribute(rse_id=rse_info[idx]['id'],
                          key='site',
                          value=base_rse_info[idx]['site'])
        add_replicas(rse_id=rse_info[idx]['id'], files=files, account=root)

    # invalidate cache for parse_expression('site=…')
    rse_expression_parser.REGION.invalidate()

    # check sites
    for idx in range(len(rse_info)):
        site_rses = rse_expression_parser.parse_expression(
            'site=' + base_rse_info[idx]['site'])
        assert len(site_rses) > 0
        assert rse_info[idx]['id'] in [rse['id'] for rse in site_rses]

    add_protocol(
        rse_info[0]['id'], {
            'scheme': schemes[0],
            'hostname': ('root.%s' % base_rse_info[0]['address']),
            'port': 1409,
            'prefix': '//test/chamber/',
            'impl': 'rucio.rse.protocols.xrootd.Default',
            'domains': {
                'lan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                },
                'wan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                }
            }
        })
    add_protocol(
        rse_info[0]['id'], {
            'scheme': schemes[2],
            'hostname': ('davs.%s' % base_rse_info[0]['address']),
            'port': 443,
            'prefix': '/test/chamber/',
            'impl': 'rucio.rse.protocols.gfal.Default',
            'domains': {
                'lan': {
                    'read': 2,
                    'write': 2,
                    'delete': 2
                },
                'wan': {
                    'read': 2,
                    'write': 2,
                    'delete': 2
                }
            }
        })
    add_protocol(
        rse_info[0]['id'], {
            'scheme': schemes[1],
            'hostname': ('gsiftp.%s' % base_rse_info[0]['address']),
            'port': 8446,
            'prefix': '/test/chamber/',
            'impl': 'rucio.rse.protocols.gfal.Default',
            'domains': {
                'lan': {
                    'read': 0,
                    'write': 0,
                    'delete': 0
                },
                'wan': {
                    'read': 3,
                    'write': 3,
                    'delete': 3
                }
            }
        })

    add_protocol(
        rse_info[1]['id'], {
            'scheme': schemes[1],
            'hostname': ('gsiftp.%s' % base_rse_info[1]['address']),
            'port': 8446,
            'prefix': '/lambda/complex/',
            'impl': 'rucio.rse.protocols.gfal.Default',
            'domains': {
                'lan': {
                    'read': 2,
                    'write': 2,
                    'delete': 2
                },
                'wan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                }
            }
        })
    add_protocol(
        rse_info[1]['id'], {
            'scheme': schemes[2],
            'hostname': ('davs.%s' % base_rse_info[1]['address']),
            'port': 443,
            'prefix': '/lambda/complex/',
            'impl': 'rucio.rse.protocols.gfal.Default',
            'domains': {
                'lan': {
                    'read': 0,
                    'write': 0,
                    'delete': 0
                },
                'wan': {
                    'read': 2,
                    'write': 2,
                    'delete': 2
                }
            }
        })
    add_protocol(
        rse_info[1]['id'], {
            'scheme': schemes[0],
            'hostname': ('root.%s' % base_rse_info[1]['address']),
            'port': 1409,
            'prefix': '//lambda/complex/',
            'impl': 'rucio.rse.protocols.xrootd.Default',
            'domains': {
                'lan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                },
                'wan': {
                    'read': 3,
                    'write': 3,
                    'delete': 3
                }
            }
        })

    yield {'files': files, 'rse_info': rse_info}

    for info in rse_info:
        delete_replicas(rse_id=info['id'], files=files)
        del_rse_attribute(rse_id=info['id'], key='site')
        del_rse(info['id'])
コード例 #4
0
ファイル: test_root_proxy.py プロジェクト: pradeepjasal/rucio
def root_proxy_example_data(vo):
    rse_without_proxy = rse_name_generator()
    rse_without_proxy_id = add_rse(rse_without_proxy, vo=vo)
    add_rse_attribute(rse_id=rse_without_proxy_id,
                      key='site',
                      value='BLACKMESA1')

    rse_with_proxy = rse_name_generator()
    rse_with_proxy_id = add_rse(rse_with_proxy, vo=vo)
    add_rse_attribute(rse_id=rse_with_proxy_id, key='site', value='APERTURE1')

    # APERTURE1 site has an internal proxy
    config_set('root-proxy-internal', 'APERTURE1', 'proxy.aperture.com:1094')

    files = [{
        'scope': InternalScope('mock', vo=vo),
        'name': 'half-life_%s' % i,
        'bytes': 1234,
        'adler32': 'deadbeef',
        'meta': {
            'events': 666
        }
    } for i in range(1, 4)]
    for rse_id in [rse_with_proxy_id, rse_without_proxy_id]:
        add_replicas(rse_id=rse_id,
                     files=files,
                     account=InternalAccount('root', vo=vo),
                     ignore_availability=True)

    add_protocol(
        rse_without_proxy_id, {
            'scheme': 'root',
            'hostname': 'root.blackmesa.com',
            'port': 1409,
            'prefix': '//training/facility/',
            'impl': 'rucio.rse.protocols.xrootd.Default',
            'domains': {
                'lan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                },
                'wan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                }
            }
        })

    add_protocol(
        rse_with_proxy_id, {
            'scheme': 'root',
            'hostname': 'root.aperture.com',
            'port': 1409,
            'prefix': '//test/chamber/',
            'impl': 'rucio.rse.protocols.xrootd.Default',
            'domains': {
                'lan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                },
                'wan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                }
            }
        })

    yield {
        'files': files,
        'rse_without_proxy': rse_without_proxy,
        'rse_with_proxy': rse_with_proxy
    }

    for rse_id in [rse_with_proxy_id, rse_without_proxy_id]:
        delete_replicas(rse_id=rse_id, files=files)
    del_rse(rse_with_proxy_id)
    del_rse(rse_without_proxy_id)
コード例 #5
0
    def setup(self):
        self.rse1 = rse_name_generator()
        self.rse2 = rse_name_generator()
        self.rse3 = rse_name_generator()
        self.rse4 = rse_name_generator()
        self.rse5 = rse_name_generator()

        self.rse1_id = rse.add_rse(self.rse1)
        self.rse2_id = rse.add_rse(self.rse2)
        self.rse3_id = rse.add_rse(self.rse3)
        self.rse4_id = rse.add_rse(self.rse4)
        self.rse5_id = rse.add_rse(self.rse5)

        # Add Attributes
        self.attribute = attribute_name_generator()

        rse.add_rse_attribute(self.rse1, self.attribute, "at")
        rse.add_rse_attribute(self.rse2, self.attribute, "de")
        rse.add_rse_attribute(self.rse3, self.attribute, "fr")
        rse.add_rse_attribute(self.rse4, self.attribute, "uk")
        rse.add_rse_attribute(self.rse5, self.attribute, "us")

        # Add Tags
        self.tag1 = tag_generator()
        self.tag2 = tag_generator()
        rse.add_rse_attribute(self.rse1, self.tag1, True)
        rse.add_rse_attribute(self.rse2, self.tag1, True)
        rse.add_rse_attribute(self.rse3, self.tag1, True)
        rse.add_rse_attribute(self.rse4, self.tag2, True)
        rse.add_rse_attribute(self.rse5, self.tag2, True)
コード例 #6
0
ファイル: test_judge_repairer.py プロジェクト: zlion/rucio
    def setUpClass(cls):
        # Add test RSE
        cls.rse1 = 'MOCK'
        cls.rse3 = 'MOCK3'
        cls.rse4 = 'MOCK4'
        cls.rse5 = 'MOCK5'

        cls.rse1_id = get_rse_id(rse=cls.rse1)
        cls.rse3_id = get_rse_id(rse=cls.rse3)
        cls.rse4_id = get_rse_id(rse=cls.rse4)
        cls.rse5_id = get_rse_id(rse=cls.rse5)

        # Add Tags
        cls.T1 = tag_generator()
        cls.T2 = tag_generator()
        add_rse_attribute(cls.rse1_id, cls.T1, True)
        add_rse_attribute(cls.rse3_id, cls.T1, True)
        add_rse_attribute(cls.rse4_id, cls.T2, True)
        add_rse_attribute(cls.rse5_id, cls.T1, True)

        # Add fake weights
        add_rse_attribute(cls.rse1_id, "fakeweight", 10)
        add_rse_attribute(cls.rse3_id, "fakeweight", 0)
        add_rse_attribute(cls.rse4_id, "fakeweight", 0)
        add_rse_attribute(cls.rse5_id, "fakeweight", 0)

        # Add quota
        cls.jdoe = InternalAccount('jdoe')
        cls.root = InternalAccount('root')
        set_local_account_limit(cls.jdoe, cls.rse1_id, -1)
        set_local_account_limit(cls.jdoe, cls.rse3_id, -1)
        set_local_account_limit(cls.jdoe, cls.rse4_id, -1)
        set_local_account_limit(cls.jdoe, cls.rse5_id, -1)

        set_local_account_limit(cls.root, cls.rse1_id, -1)
        set_local_account_limit(cls.root, cls.rse3_id, -1)
        set_local_account_limit(cls.root, cls.rse4_id, -1)
        set_local_account_limit(cls.root, cls.rse5_id, -1)
コード例 #7
0
def test_list_requests(vo, rest_client, auth_token):
    """ REQUEST (REST): list requests """
    source_rse = 'MOCK'
    source_rse_id = get_rse_id(source_rse, vo=vo)
    source_rse2 = 'MOCK2'
    source_rse_id2 = get_rse_id(source_rse2, vo=vo)
    source_rse3 = 'MOCK5'
    source_rse_id3 = get_rse_id(source_rse3, vo=vo)
    dest_rse = 'MOCK3'
    dest_rse_id = get_rse_id(dest_rse, vo=vo)
    dest_rse2 = 'MOCK4'
    dest_rse_id2 = get_rse_id(dest_rse2, vo=vo)
    db_session = session.get_session()
    source_site = 'SITE1'
    source_site2 = 'SITE2'
    dst_site = 'SITE3'
    dst_site2 = 'SITE4'
    add_rse_attribute(source_rse_id, 'site', source_site)
    add_rse_attribute(source_rse_id2, 'site', source_site2)
    add_rse_attribute(source_rse_id3, 'site', source_site)
    add_rse_attribute(dest_rse_id, 'site', dst_site)
    add_rse_attribute(dest_rse_id2, 'site', dst_site2)

    db_session.query(models.Source).delete()
    db_session.query(models.Request).delete()
    db_session.commit()

    name1 = generate_uuid()
    name2 = generate_uuid()
    name3 = generate_uuid()
    models.Request(state=constants.RequestState.WAITING, source_rse_id=source_rse_id, dest_rse_id=dest_rse_id, name=name3).save(session=db_session)
    models.Request(state=constants.RequestState.SUBMITTED, source_rse_id=source_rse_id2, dest_rse_id=dest_rse_id, name=name1).save(session=db_session)
    models.Request(state=constants.RequestState.SUBMITTED, source_rse_id=source_rse_id, dest_rse_id=dest_rse_id2, name=name1).save(session=db_session)
    models.Request(state=constants.RequestState.SUBMITTED, source_rse_id=source_rse_id, dest_rse_id=dest_rse_id, name=name1).save(session=db_session)
    models.Request(state=constants.RequestState.SUBMITTED, source_rse_id=source_rse_id, dest_rse_id=dest_rse_id, name=name2).save(session=db_session)
    models.Request(state=constants.RequestState.SUBMITTED, source_rse_id=source_rse_id3, dest_rse_id=dest_rse_id, name=name2).save(session=db_session)
    db_session.commit()

    def check_correct_api(params, expected_requests):
        headers_dict = {'X-Rucio-Type': 'user', 'X-Rucio-Account': 'root'}
        response = rest_client.get('/requests/list', query_string=params, headers=headers(auth(auth_token), vohdr(vo), hdrdict(headers_dict)))
        assert response.status_code == 200
        requests = set()
        for request in response.get_data(as_text=True).split('\n')[:-1]:
            request = parse_response(request)
            requests.add((request['state'], request['source_rse_id'], request['dest_rse_id'], request['name']))
        assert requests == expected_requests

    def check_error_api(params, exception_class, exception_message, code):
        headers_dict = {'X-Rucio-Type': 'user', 'X-Rucio-Account': 'root'}
        response = rest_client.get('/requests/list', query_string=params, headers=headers(auth(auth_token), vohdr(vo), hdrdict(headers_dict)))
        assert response.status_code == code
        body = parse_response(response.get_data(as_text=True))
        assert body['ExceptionClass'] == exception_class
        assert body['ExceptionMessage'] == exception_message

    params = {'src_rse': source_rse, 'dst_rse': dest_rse, 'request_states': 'S'}
    expected_requests = set()
    expected_requests.add(('SUBMITTED', source_rse_id, dest_rse_id, name1))
    expected_requests.add(('SUBMITTED', source_rse_id, dest_rse_id, name2))
    check_correct_api(params, expected_requests)

    params = {'src_rse': source_rse, 'dst_rse': dest_rse, 'request_states': 'Q'}
    expected_requests = set([])
    check_correct_api(params, expected_requests)

    params = {'src_rse': source_rse2, 'dst_rse': dest_rse, 'request_states': 'S'}
    expected_requests = set()
    expected_requests.add(('SUBMITTED', source_rse_id2, dest_rse_id, name1))
    check_correct_api(params, expected_requests)

    params = {'src_rse': source_rse, 'dst_rse': dest_rse2, 'request_states': 'S'}
    expected_requests = set()
    expected_requests.add(('SUBMITTED', source_rse_id, dest_rse_id2, name1))
    check_correct_api(params, expected_requests)

    params = {'src_site': source_site, 'dst_site': dst_site, 'request_states': 'S'}
    expected_requests = set()
    expected_requests.add(('SUBMITTED', source_rse_id, dest_rse_id, name1))
    expected_requests.add(('SUBMITTED', source_rse_id, dest_rse_id, name2))
    # check correct resolution of site attribute to multiple RSE
    expected_requests.add(('SUBMITTED', source_rse_id3, dest_rse_id, name2))
    check_correct_api(params, expected_requests)

    params = {'src_site': source_site, 'dst_site': dst_site, 'request_states': 'S,W,Q'}
    expected_requests = set()
    expected_requests.add(('SUBMITTED', source_rse_id, dest_rse_id, name1))
    expected_requests.add(('SUBMITTED', source_rse_id, dest_rse_id, name2))
    expected_requests.add(('WAITING', source_rse_id, dest_rse_id, name3))
    expected_requests.add(('SUBMITTED', source_rse_id3, dest_rse_id, name2))
    check_correct_api(params, expected_requests)

    params = {'src_site': source_site2, 'dst_site': dst_site, 'request_states': 'S'}
    expected_requests = set()
    expected_requests.add(('SUBMITTED', source_rse_id2, dest_rse_id, name1))
    check_correct_api(params, expected_requests)

    params = {'src_site': source_site, 'dst_site': dst_site2, 'request_states': 'S'}
    expected_requests = set()
    expected_requests.add(('SUBMITTED', source_rse_id, dest_rse_id2, name1))
    check_correct_api(params, expected_requests)

    params = {'src_site': source_site, 'dst_site': dst_site2, 'request_states': 'S,W,Q'}
    expected_requests = set()
    expected_requests.add(('SUBMITTED', source_rse_id, dest_rse_id2, name1))
    check_correct_api(params, expected_requests)

    params = {}
    check_error_api(params, 'MissingParameter', 'Request state is missing', 400)

    params = {'request_states': 'unkown', 'dst_rse': dest_rse, 'src_rse': source_rse}
    check_error_api(params, 'Invalid', 'Request state value is invalid', 400)

    params = {'request_states': 'S', 'src_rse': source_rse}
    check_error_api(params, 'MissingParameter', 'Destination RSE is missing', 400)

    params = {'request_states': 'S', 'dst_rse': source_rse}
    check_error_api(params, 'MissingParameter', 'Source RSE is missing', 400)

    params = {'request_states': 'S', 'src_rse': source_rse, 'dst_site': 'SITE'}
    check_error_api(params, 'MissingParameter', 'Destination RSE is missing', 400)

    params = {'request_states': 'S', 'src_site': source_site}
    check_error_api(params, 'MissingParameter', 'Destination site is missing', 400)

    params = {'request_states': 'S', 'dst_site': dst_site}
    check_error_api(params, 'MissingParameter', 'Source site is missing', 400)

    params = {'request_states': 'S', 'src_site': source_site, 'dst_site': 'unknown'}
    check_error_api(params, 'NotFound', 'Could not resolve site name unknown to RSE', 404)
コード例 #8
0
    def __init__(self):
        self.rse1 = rse_name_generator()
        self.rse2 = rse_name_generator()
        self.rse3 = rse_name_generator()
        self.rse4 = rse_name_generator()
        self.rse5 = rse_name_generator()

        self.rse1_id = rse.add_rse(self.rse1)
        self.rse2_id = rse.add_rse(self.rse2)
        self.rse3_id = rse.add_rse(self.rse3)
        self.rse4_id = rse.add_rse(self.rse4)
        self.rse5_id = rse.add_rse(self.rse5)

        # Add Attributes
        self.attribute = attribute_name_generator()

        rse.add_rse_attribute(self.rse1, self.attribute, "at")
        rse.add_rse_attribute(self.rse2, self.attribute, "de")
        rse.add_rse_attribute(self.rse3, self.attribute, "fr")
        rse.add_rse_attribute(self.rse4, self.attribute, "uk")
        rse.add_rse_attribute(self.rse5, self.attribute, "us")

        # Add Tags
        self.tag1 = tag_generator()
        self.tag2 = tag_generator()
        rse.add_rse_attribute(self.rse1, self.tag1, True)
        rse.add_rse_attribute(self.rse2, self.tag1, True)
        rse.add_rse_attribute(self.rse3, self.tag1, True)
        rse.add_rse_attribute(self.rse4, self.tag2, True)
        rse.add_rse_attribute(self.rse5, self.tag2, True)

        self.rse_client = RSEClient()
コード例 #9
0
    def setUp(self):
        if config_get_bool('common',
                           'multi_vo',
                           raise_exception=False,
                           default=False):
            self.vo = {'vo': get_vo()}
        else:
            self.vo = {}

        self.replica_client = ReplicaClient()
        assert hasattr(self, "rse_factory")
        rse_factory = self.rse_factory

        # Using two test RSEs
        self.rse4suspicious, self.rse4suspicious_id = rse_factory.make_posix_rse(
            deterministic=True, **self.vo)
        self.rse4recovery, self.rse4recovery_id = rse_factory.make_posix_rse(
            deterministic=True, **self.vo)
        self.scope = 'mock'
        self.internal_scope = InternalScope(self.scope, **self.vo)

        # For testing, we create 5 files and upload them to Rucio to two test RSEs.
        self.tmp_file1 = file_generator()
        self.tmp_file2 = file_generator()
        self.tmp_file3 = file_generator()
        self.tmp_file4 = file_generator()
        self.tmp_file5 = file_generator()

        self.listdids = [{
            'scope': self.internal_scope,
            'name': path.basename(f),
            'type': DIDType.FILE
        } for f in [
            self.tmp_file1, self.tmp_file2, self.tmp_file3, self.tmp_file4,
            self.tmp_file5
        ]]

        for rse in [self.rse4suspicious, self.rse4recovery]:
            cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4} {5} {6}'.format(
                rse, self.scope, self.tmp_file1, self.tmp_file2,
                self.tmp_file3, self.tmp_file4, self.tmp_file5)
            exitcode, out, err = execute(cmd)

            print(exitcode, out, err)
            # checking if Rucio upload went OK
            assert exitcode == 0

        # Set fictional datatypes
        set_metadata(self.internal_scope, path.basename(self.tmp_file4),
                     'datatype', 'testtypedeclarebad')
        set_metadata(self.internal_scope, path.basename(self.tmp_file5),
                     'datatype', 'testtypenopolicy')

        # Allow for the RSEs to be affected by the suspicious file recovery daemon
        add_rse_attribute(self.rse4suspicious_id,
                          "enable_suspicious_file_recovery", True)
        add_rse_attribute(self.rse4recovery_id,
                          "enable_suspicious_file_recovery", True)

        # removing physical files from /tmp location - keeping only their DB info
        remove(self.tmp_file1)
        remove(self.tmp_file2)
        remove(self.tmp_file3)
        remove(self.tmp_file4)
        remove(self.tmp_file5)

        # Gather replica info
        replicalist = list_replicas(dids=self.listdids)

        # Changing the replica statuses as follows:
        # ----------------------------------------------------------------------------------------------------------------------------------
        # Name         State(s) declared on rse4recovery       State(s) declared on rse4suspicious        Metadata "datatype"
        # ----------------------------------------------------------------------------------------------------------------------------------
        # tmp_file1    available                                suspicious (available)
        # tmp_file2    available                                suspicious + bad (unavailable)
        # tmp_file3    unavailable                              suspicious (available)                      RAW
        # tmp_file4    unavailable                              suspicious (available)                      testtypedeclarebad
        # tmp_file5    unavailable                              suspicious (available)                      testtypenopolicy
        # ----------------------------------------------------------------------------------------------------------------------------------

        for replica in replicalist:
            suspicious_pfns = replica['rses'][self.rse4suspicious_id]
            for i in range(3):
                print("Declaring suspicious file replica: " +
                      suspicious_pfns[0])
                # The reason must contain the word "checksum", so that the replica can be declared bad.
                self.replica_client.declare_suspicious_file_replicas([
                    suspicious_pfns[0],
                ], 'checksum')
                sleep(1)
            if replica['name'] == path.basename(self.tmp_file2):
                print("Declaring bad file replica: " + suspicious_pfns[0])
                self.replica_client.declare_bad_file_replicas([
                    suspicious_pfns[0],
                ], 'checksum')
            if replica['name'] == path.basename(self.tmp_file3):
                print("Updating replica state as unavailable: " +
                      replica['rses'][self.rse4recovery_id][0])
                update_replica_state(self.rse4recovery_id, self.internal_scope,
                                     path.basename(self.tmp_file3),
                                     ReplicaState.UNAVAILABLE)
            if replica['name'] == path.basename(self.tmp_file4):
                print("Updating replica state as unavailable: " +
                      replica['rses'][self.rse4recovery_id][0])
                update_replica_state(self.rse4recovery_id, self.internal_scope,
                                     path.basename(self.tmp_file4),
                                     ReplicaState.UNAVAILABLE)
            if replica['name'] == path.basename(self.tmp_file5):
                print("Updating replica state as unavailable: " +
                      replica['rses'][self.rse4recovery_id][0])
                update_replica_state(self.rse4recovery_id, self.internal_scope,
                                     path.basename(self.tmp_file5),
                                     ReplicaState.UNAVAILABLE)

        # Gather replica info after setting initial replica statuses
        replicalist = list_replicas(dids=self.listdids)

        # Checking if the status changes were effective
        for replica in replicalist:
            if replica['name'] == path.basename(self.tmp_file1):
                assert replica['states'][self.rse4suspicious_id] == 'AVAILABLE'
                assert replica['states'][self.rse4recovery_id] == 'AVAILABLE'
            if replica['name'] == path.basename(self.tmp_file2):
                assert (self.rse4suspicious_id in replica['states']) is False
                assert replica['states'][self.rse4recovery_id] == 'AVAILABLE'
            if replica['name'] == path.basename(self.tmp_file3):
                assert replica['states'][self.rse4suspicious_id] == 'AVAILABLE'
                assert (self.rse4recovery_id in replica['states']) is False
            if replica['name'] == path.basename(self.tmp_file4):
                assert replica['states'][self.rse4suspicious_id] == 'AVAILABLE'
                assert (self.rse4recovery_id in replica['states']) is False
            if replica['name'] == path.basename(self.tmp_file5):
                assert replica['states'][self.rse4suspicious_id] == 'AVAILABLE'
                assert (self.rse4recovery_id in replica['states']) is False

        # Checking if only self.tmp_file2 is declared as 'BAD'
        self.from_date = datetime.now() - timedelta(days=1)
        bad_replicas_list = list_bad_replicas_status(
            rse_id=self.rse4suspicious_id,
            younger_than=self.from_date,
            **self.vo)
        bad_checklist = [(badf['name'], badf['rse_id'], badf['state'])
                         for badf in bad_replicas_list]

        assert (path.basename(self.tmp_file1), self.rse4suspicious_id,
                BadFilesStatus.BAD) not in bad_checklist
        assert (path.basename(self.tmp_file2), self.rse4suspicious_id,
                BadFilesStatus.BAD) in bad_checklist
        assert (path.basename(self.tmp_file3), self.rse4suspicious_id,
                BadFilesStatus.BAD) not in bad_checklist
        assert (path.basename(self.tmp_file4), self.rse4suspicious_id,
                BadFilesStatus.BAD) not in bad_checklist
        assert (path.basename(self.tmp_file5), self.rse4suspicious_id,
                BadFilesStatus.BAD) not in bad_checklist

        bad_replicas_list = list_bad_replicas_status(
            rse_id=self.rse4recovery_id,
            younger_than=self.from_date,
            **self.vo)
        bad_checklist = [(badf['name'], badf['rse_id'], badf['state'])
                         for badf in bad_replicas_list]

        assert (path.basename(self.tmp_file1), self.rse4recovery_id,
                BadFilesStatus.BAD) not in bad_checklist
        assert (path.basename(self.tmp_file2), self.rse4recovery_id,
                BadFilesStatus.BAD) not in bad_checklist
        assert (path.basename(self.tmp_file3), self.rse4recovery_id,
                BadFilesStatus.BAD) not in bad_checklist
        assert (path.basename(self.tmp_file4), self.rse4recovery_id,
                BadFilesStatus.BAD) not in bad_checklist
        assert (path.basename(self.tmp_file5), self.rse4recovery_id,
                BadFilesStatus.BAD) not in bad_checklist
コード例 #10
0
def test_globus(rse_factory, did_factory, root_account):
    """
    Test bulk submissions with globus transfertool.
    Rely on mocks, because we don't contact a real globus server in tests
    """
    # +------+    +------+
    # |      |    |      |
    # | RSE1 +--->| RSE2 |
    # |      |    |      |
    # +------+    +------+
    #
    # +------+    +------+
    # |      |    |      |
    # | RSE3 +--->| RSE4 |
    # |      |    |      |
    # +------+    +------+
    rse1, rse1_id = rse_factory.make_posix_rse()
    rse2, rse2_id = rse_factory.make_posix_rse()
    rse3, rse3_id = rse_factory.make_posix_rse()
    rse4, rse4_id = rse_factory.make_posix_rse()
    all_rses = [rse1_id, rse2_id, rse3_id, rse4_id]

    distance_core.add_distance(rse1_id, rse2_id, ranking=10)
    distance_core.add_distance(rse3_id, rse4_id, ranking=10)
    for rse_id in all_rses:
        rse_core.add_rse_attribute(rse_id, 'globus_endpoint_id', rse_id)

    # Single submission
    did1 = did_factory.upload_test_file(rse1)
    rule_core.add_rule(dids=[did1],
                       account=root_account,
                       copies=1,
                       rse_expression=rse2,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)
    did2 = did_factory.upload_test_file(rse3)
    rule_core.add_rule(dids=[did2],
                       account=root_account,
                       copies=1,
                       rse_expression=rse4,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)
    with patch(
            'rucio.transfertool.globus.bulk_submit_xfer') as mock_bulk_submit:
        mock_bulk_submit.return_value = 0
        submitter(once=True,
                  rses=[{
                      'id': rse_id
                  } for rse_id in all_rses],
                  group_bulk=10,
                  partition_wait_time=None,
                  transfertool='globus',
                  transfertype='single',
                  filter_transfertool=None)
        # Called separately for each job
        assert len(mock_bulk_submit.call_args_list) == 2
        (submitjob, ), _kwargs = mock_bulk_submit.call_args_list[0]
        assert len(submitjob) == 1

    # Bulk submission
    did1 = did_factory.upload_test_file(rse1)
    rule_core.add_rule(dids=[did1],
                       account=root_account,
                       copies=1,
                       rse_expression=rse2,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)
    did2 = did_factory.upload_test_file(rse3)
    rule_core.add_rule(dids=[did2],
                       account=root_account,
                       copies=1,
                       rse_expression=rse4,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)
    with patch(
            'rucio.transfertool.globus.bulk_submit_xfer') as mock_bulk_submit:
        mock_bulk_submit.return_value = 0
        submitter(once=True,
                  rses=[{
                      'id': rse_id
                  } for rse_id in all_rses],
                  group_bulk=10,
                  partition_wait_time=None,
                  transfertool='globus',
                  transfertype='bulk',
                  filter_transfertool=None)

        mock_bulk_submit.assert_called_once()
        (submitjob, ), _kwargs = mock_bulk_submit.call_args_list[0]

        # both jobs were grouped together and submitted in one call
        assert len(submitjob) == 2

        job_did1 = next(
            iter(
                filter(lambda job: did1['name'] in job['sources'][0],
                       submitjob)))
        assert len(job_did1['sources']) == 1
        assert len(job_did1['destinations']) == 1
        assert job_did1['metadata']['src_rse'] == rse1
        assert job_did1['metadata']['dst_rse'] == rse2
        assert job_did1['metadata']['name'] == did1['name']
        assert job_did1['metadata']['source_globus_endpoint_id'] == rse1_id
        assert job_did1['metadata']['dest_globus_endpoint_id'] == rse2_id

        job_did2 = next(
            iter(
                filter(lambda job: did2['name'] in job['sources'][0],
                       submitjob)))
        assert len(job_did2['sources']) == 1
        assert len(job_did2['destinations']) == 1
        assert job_did2['metadata']['src_rse'] == rse3
        assert job_did2['metadata']['dst_rse'] == rse4
        assert job_did2['metadata']['name'] == did2['name']
    request = request_core.get_request_by_did(rse_id=rse2_id, **did1)
    assert request['state'] == RequestState.SUBMITTED
    request = request_core.get_request_by_did(rse_id=rse4_id, **did2)
    assert request['state'] == RequestState.SUBMITTED
コード例 #11
0
def test_multihop_sources_created(rse_factory, did_factory, root_account,
                                  core_config_mock, caches_mock, metrics_mock):
    """
    Ensure that multihop transfers are handled and intermediate request correctly created
    """
    src_rse_name, src_rse_id = rse_factory.make_posix_rse()
    _, jump_rse1_id = rse_factory.make_posix_rse()
    _, jump_rse2_id = rse_factory.make_posix_rse()
    _, jump_rse3_id = rse_factory.make_posix_rse()
    dst_rse_name, dst_rse_id = rse_factory.make_posix_rse()

    jump_rses = [jump_rse1_id, jump_rse2_id, jump_rse3_id]
    all_rses = jump_rses + [src_rse_id, dst_rse_id]

    for rse_id in jump_rses:
        rse_core.add_rse_attribute(rse_id, 'available_for_multihop', True)

    rse_tombstone_delay = 3600
    rse_multihop_tombstone_delay = 12 * 3600
    default_multihop_tombstone_delay = 24 * 3600

    # if both attributes are set, the multihop one will take precedence
    rse_core.add_rse_attribute(jump_rse1_id, 'tombstone_delay',
                               rse_tombstone_delay)
    rse_core.add_rse_attribute(jump_rse1_id, 'multihop_tombstone_delay',
                               rse_multihop_tombstone_delay)

    # if multihop delay not set, it's the default multihop takes precedence. Not normal tombstone delay.
    rse_core.add_rse_attribute(jump_rse2_id, 'tombstone_delay',
                               rse_tombstone_delay)
    core_config.set(section='transfers',
                    option='multihop_tombstone_delay',
                    value=default_multihop_tombstone_delay)

    # if multihop delay is set to 0, the replica will have no tombstone
    rse_core.add_rse_attribute(jump_rse3_id, 'multihop_tombstone_delay', 0)

    distance_core.add_distance(src_rse_id, jump_rse1_id, ranking=10)
    distance_core.add_distance(jump_rse1_id, jump_rse2_id, ranking=10)
    distance_core.add_distance(jump_rse2_id, jump_rse3_id, ranking=10)
    distance_core.add_distance(jump_rse3_id, dst_rse_id, ranking=10)

    did = did_factory.upload_test_file(src_rse_name)
    rule_core.add_rule(dids=[did],
                       account=root_account,
                       copies=1,
                       rse_expression=dst_rse_name,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)

    submitter(once=True,
              rses=[{
                  'id': rse_id
              } for rse_id in all_rses],
              partition_wait_time=None,
              transfertool='mock',
              transfertype='single',
              filter_transfertool=None)

    # Ensure that each intermediate request was correctly created
    for rse_id in jump_rses:
        assert request_core.get_request_by_did(rse_id=rse_id, **did)

    @read_session
    def __ensure_source_exists(rse_id, scope, name, session=None):
        return session.query(Source). \
            filter(Source.rse_id == rse_id). \
            filter(Source.scope == scope). \
            filter(Source.name == name). \
            one()

    # Ensure that sources where created for transfers
    for rse_id in jump_rses + [src_rse_id]:
        __ensure_source_exists(rse_id, **did)

    # Ensure the tombstone is correctly set on intermediate replicas
    expected_tombstone = datetime.utcnow() + timedelta(
        seconds=rse_multihop_tombstone_delay)
    replica = replica_core.get_replica(jump_rse1_id, **did)
    assert expected_tombstone - timedelta(minutes=5) < replica[
        'tombstone'] < expected_tombstone + timedelta(minutes=5)

    expected_tombstone = datetime.utcnow() + timedelta(
        seconds=default_multihop_tombstone_delay)
    replica = replica_core.get_replica(jump_rse2_id, **did)
    assert expected_tombstone - timedelta(minutes=5) < replica[
        'tombstone'] < expected_tombstone + timedelta(minutes=5)

    replica = replica_core.get_replica(jump_rse3_id, **did)
    assert replica['tombstone'] is None

    # Ensure that prometheus metrics were correctly registered. Only one submission, mock transfertool groups everything into one job.
    assert metrics_mock.get_sample_value(
        'rucio_core_request_submit_transfer_total') == 1
コード例 #12
0
    def setUpClass(cls):
        # Add test RSE
        cls.rse1 = 'MOCK'
        cls.rse3 = 'MOCK3'
        cls.rse4 = 'MOCK4'
        cls.rse5 = 'MOCK5'

        cls.rse1_id = get_rse(cls.rse1).id
        cls.rse3_id = get_rse(cls.rse3).id
        cls.rse4_id = get_rse(cls.rse4).id
        cls.rse5_id = get_rse(cls.rse5).id

        # Add Tags
        cls.T1 = tag_generator()
        cls.T2 = tag_generator()
        add_rse_attribute(cls.rse1, cls.T1, True)
        add_rse_attribute(cls.rse3, cls.T1, True)
        add_rse_attribute(cls.rse4, cls.T2, True)
        add_rse_attribute(cls.rse5, cls.T1, True)

        # Add fake weights
        add_rse_attribute(cls.rse1, "fakeweight", 10)
        add_rse_attribute(cls.rse3, "fakeweight", 0)
        add_rse_attribute(cls.rse4, "fakeweight", 0)
        add_rse_attribute(cls.rse5, "fakeweight", 0)

        # Add quota
        set_account_limit('jdoe', cls.rse1_id, -1)
        set_account_limit('jdoe', cls.rse3_id, -1)
        set_account_limit('jdoe', cls.rse4_id, -1)
        set_account_limit('jdoe', cls.rse5_id, -1)

        set_account_limit('root', cls.rse1_id, -1)
        set_account_limit('root', cls.rse3_id, -1)
        set_account_limit('root', cls.rse4_id, -1)
        set_account_limit('root', cls.rse5_id, -1)
コード例 #13
0
ファイル: test_judge_evaluator.py プロジェクト: rcarpa/rucio
    def setUpClass(cls):
        if config_get_bool('common',
                           'multi_vo',
                           raise_exception=False,
                           default=False):
            cls.vo = {'vo': get_vo()}
        else:
            cls.vo = {}

        @transactional_session
        def __cleanup_updated_dids(session=None):
            session.query(UpdatedDID).delete()

        __cleanup_updated_dids()

        # Add test RSE
        cls.rse1 = 'MOCK'
        cls.rse3 = 'MOCK3'
        cls.rse4 = 'MOCK4'
        cls.rse5 = 'MOCK5'

        cls.rse1_id = get_rse_id(rse=cls.rse1, **cls.vo)
        cls.rse3_id = get_rse_id(rse=cls.rse3, **cls.vo)
        cls.rse4_id = get_rse_id(rse=cls.rse4, **cls.vo)
        cls.rse5_id = get_rse_id(rse=cls.rse5, **cls.vo)

        # Add Tags
        cls.T1 = tag_generator()
        cls.T2 = tag_generator()
        add_rse_attribute(cls.rse1_id, cls.T1, True)
        add_rse_attribute(cls.rse3_id, cls.T1, True)
        add_rse_attribute(cls.rse4_id, cls.T2, True)
        add_rse_attribute(cls.rse5_id, cls.T1, True)

        # Add fake weights
        add_rse_attribute(cls.rse1_id, "fakeweight", 10)
        add_rse_attribute(cls.rse3_id, "fakeweight", 0)
        add_rse_attribute(cls.rse4_id, "fakeweight", 0)
        add_rse_attribute(cls.rse5_id, "fakeweight", 0)

        # Add quota
        cls.jdoe = InternalAccount('jdoe', **cls.vo)
        cls.root = InternalAccount('root', **cls.vo)
        set_local_account_limit(cls.jdoe, cls.rse1_id, -1)
        set_local_account_limit(cls.jdoe, cls.rse3_id, -1)
        set_local_account_limit(cls.jdoe, cls.rse4_id, -1)
        set_local_account_limit(cls.jdoe, cls.rse5_id, -1)

        set_local_account_limit(cls.root, cls.rse1_id, -1)
        set_local_account_limit(cls.root, cls.rse3_id, -1)
        set_local_account_limit(cls.root, cls.rse4_id, -1)
        set_local_account_limit(cls.root, cls.rse5_id, -1)
コード例 #14
0
ファイル: test_s3.py プロジェクト: rcarpa/rucio
    def setUp(self):
        if config_get_bool('common',
                           'multi_vo',
                           raise_exception=False,
                           default=False):
            self.vo = {'vo': get_vo()}
        else:
            self.vo = {}

        self.root = InternalAccount('root', **self.vo)

        # add an S3 storage with a replica
        self.rc = client.ReplicaClient()
        self.rses3 = rse_name_generator()
        self.rses3_id = add_rse(self.rses3, **self.vo)
        add_protocol(
            self.rses3_id, {
                'scheme': 'https',
                'hostname': 'fake-rucio.s3-eu-south-8.amazonaws.com',
                'port': 443,
                'prefix': '/',
                'impl': 'rucio.rse.protocols.gfal.NoRename',
                'domains': {
                    'lan': {
                        'read': 1,
                        'write': 1,
                        'delete': 1
                    },
                    'wan': {
                        'read': 1,
                        'write': 1,
                        'delete': 1,
                        'third_party_copy': 1
                    }
                }
            })
        add_rse_attribute(rse_id=self.rses3_id, key='sign_url', value='s3')
        add_rse_attribute(rse_id=self.rses3_id, key='fts', value='localhost')
        self.files3 = [{
            'scope': InternalScope('mock', **self.vo),
            'name': 'file-on-aws',
            'bytes': 1234,
            'adler32': 'deadbeef',
            'meta': {
                'events': 123
            }
        }]
        add_replicas(rse_id=self.rses3_id,
                     files=self.files3,
                     account=self.root)

        # add a non-S3 storage with a replica
        self.rsenons3 = rse_name_generator()
        self.rsenons3_id = add_rse(self.rsenons3, **self.vo)
        add_protocol(
            self.rsenons3_id, {
                'scheme': 'https',
                'hostname': 'somestorage.ch',
                'port': 1094,
                'prefix': '/my/prefix',
                'impl': 'rucio.rse.protocols.gfal.Default',
                'domains': {
                    'lan': {
                        'read': 1,
                        'write': 1,
                        'delete': 1
                    },
                    'wan': {
                        'read': 1,
                        'write': 1,
                        'delete': 1,
                        'third_party_copy': 1
                    }
                }
            })
        add_rse_attribute(rse_id=self.rsenons3_id,
                          key='fts',
                          value='localhost')
        self.filenons3 = [{
            'scope': InternalScope('mock', **self.vo),
            'name': 'file-on-storage',
            'bytes': 1234,
            'adler32': 'deadbeef',
            'meta': {
                'events': 321
            }
        }]
        add_replicas(rse_id=self.rsenons3_id,
                     files=self.filenons3,
                     account=self.root)

        # set the distance both ways
        add_distance(self.rses3_id,
                     self.rsenons3_id,
                     ranking=1,
                     agis_distance=1,
                     geoip_distance=1)
        add_distance(self.rsenons3_id,
                     self.rses3_id,
                     ranking=1,
                     agis_distance=1,
                     geoip_distance=1)
コード例 #15
0
    def setUpClass(cls):
        if config_get_bool('common',
                           'multi_vo',
                           raise_exception=False,
                           default=False):
            cls.vo = {'vo': 'tst'}
        else:
            cls.vo = {}

        # Add test RSE
        cls.rse1 = 'MOCK'
        cls.rse3 = 'MOCK3'
        cls.rse4 = 'MOCK4'
        cls.rse5 = 'MOCK5'

        cls.rse1_id = get_rse_id(rse=cls.rse1, **cls.vo)
        cls.rse3_id = get_rse_id(rse=cls.rse3, **cls.vo)
        cls.rse4_id = get_rse_id(rse=cls.rse4, **cls.vo)
        cls.rse5_id = get_rse_id(rse=cls.rse5, **cls.vo)

        # Add Tags
        cls.T1 = tag_generator()
        cls.T2 = tag_generator()
        add_rse_attribute(cls.rse1_id, cls.T1, True)
        add_rse_attribute(cls.rse3_id, cls.T1, True)
        add_rse_attribute(cls.rse4_id, cls.T2, True)
        add_rse_attribute(cls.rse5_id, cls.T1, True)

        # Add fake weights
        add_rse_attribute(cls.rse1_id, "fakeweight", 10)
        add_rse_attribute(cls.rse3_id, "fakeweight", 0)
        add_rse_attribute(cls.rse4_id, "fakeweight", 0)
        add_rse_attribute(cls.rse5_id, "fakeweight", 0)

        # Add quota
        cls.jdoe = InternalAccount('jdoe', **cls.vo)
        cls.root = InternalAccount('root', **cls.vo)
        set_account_limit(cls.jdoe, cls.rse1_id, -1)
        set_account_limit(cls.jdoe, cls.rse3_id, -1)
        set_account_limit(cls.jdoe, cls.rse4_id, -1)
        set_account_limit(cls.jdoe, cls.rse5_id, -1)

        set_account_limit(cls.root, cls.rse1_id, -1)
        set_account_limit(cls.root, cls.rse3_id, -1)
        set_account_limit(cls.root, cls.rse4_id, -1)
        set_account_limit(cls.root, cls.rse5_id, -1)
コード例 #16
0
    def setup(self):

        self.rc = ReplicaClient()

        self.client_location_without_proxy = {'ip': '192.168.0.1',
                                              'fqdn': 'anomalous-materials.blackmesa.com',
                                              'site': 'BLACKMESA1'}
        self.rse_without_proxy = rse_name_generator()
        self.rse_without_proxy_id = add_rse(self.rse_without_proxy)
        add_rse_attribute(rse_id=self.rse_without_proxy_id,
                          key='site',
                          value='BLACKMESA1')

        self.client_location_with_proxy = {'ip': '10.0.1.1',
                                           'fqdn': 'test-chamber.aperture.com',
                                           'site': 'APERTURE1'}
        self.rse_with_proxy = rse_name_generator()
        self.rse_with_proxy_id = add_rse(self.rse_with_proxy)
        add_rse_attribute(rse_id=self.rse_with_proxy_id,
                          key='site',
                          value='APERTURE1')

        # APERTURE1 site has an internal proxy
        config_set('root-proxy-internal', 'APERTURE1', 'proxy.aperture.com:1094')

        self.files = [{'scope': 'mock',
                       'name': 'half-life_%s' % i,
                       'bytes': 1234,
                       'adler32': 'deadbeef',
                       'meta': {'events': 666}} for i in range(1, 4)]
        for rse_id in [self.rse_with_proxy_id, self.rse_without_proxy_id]:
            add_replicas(rse_id=rse_id,
                         files=self.files,
                         account='root',
                         ignore_availability=True)

        add_protocol(self.rse_without_proxy_id, {'scheme': 'root',
                                                 'hostname': 'root.blackmesa.com',
                                                 'port': 1409,
                                                 'prefix': '//training/facility/',
                                                 'impl': 'rucio.rse.protocols.xrootd.Default',
                                                 'domains': {
                                                     'lan': {'read': 1,
                                                             'write': 1,
                                                             'delete': 1},
                                                     'wan': {'read': 1,
                                                             'write': 1,
                                                             'delete': 1}}})

        add_protocol(self.rse_with_proxy_id, {'scheme': 'root',
                                              'hostname': 'root.aperture.com',
                                              'port': 1409,
                                              'prefix': '//test/chamber/',
                                              'impl': 'rucio.rse.protocols.xrootd.Default',
                                              'domains': {
                                                  'lan': {'read': 1,
                                                          'write': 1,
                                                          'delete': 1},
                                                  'wan': {'read': 1,
                                                          'write': 1,
                                                          'delete': 1}}})
コード例 #17
0
ファイル: setup_data.py プロジェクト: asketagarwal/rucio
    os.mkdir('/tmp/SITE2_DISK')
    os.mkdir('/tmp/SITE1_DISK')

    params = {'scheme': 'file',
              'prefix': '/tmp/SITE1_DISK/',
              'impl': 'rucio.rse.protocols.posix.Default',
              'domains': {"lan": {"read": 1,
                                  "write": 1,
                                  "delete": 1},
                          "wan": {"read": 1,
                                  "write": 1,
                                  "delete": 1}}}

    add_rse('SITE1_DISK', 'root')
    add_protocol('SITE1_DISK', params)
    add_rse_attribute(rse='SITE1_DISK', key='istape', value='False')

    params = {'scheme': 'file',
              'prefix': '/tmp/SITE2_DISK/',
              'impl': 'rucio.rse.protocols.posix.Default',
              'domains': {"lan": {"read": 1,
                                  "write": 1,
                                  "delete": 1},
                          "wan": {"read": 1,
                                  "write": 1,
                                  "delete": 1}}}

    add_rse('SITE2_DISK', 'root')
    add_protocol('SITE2_DISK', params)
    add_rse_attribute(rse='SITE2_DISK', key='istape', value='False')
コード例 #18
0
    def __init__(self):
        self.rse1 = rse_name_generator()
        self.rse2 = rse_name_generator()
        self.rse3 = rse_name_generator()
        self.rse4 = rse_name_generator()
        self.rse5 = rse_name_generator()

        self.rse1_id = rse.add_rse(self.rse1)
        self.rse2_id = rse.add_rse(self.rse2)
        self.rse3_id = rse.add_rse(self.rse3)
        self.rse4_id = rse.add_rse(self.rse4)
        self.rse5_id = rse.add_rse(self.rse5)

        # Add Attributes
        self.attribute = attribute_name_generator()

        rse.add_rse_attribute(self.rse1, self.attribute, "at")
        rse.add_rse_attribute(self.rse2, self.attribute, "de")
        rse.add_rse_attribute(self.rse3, self.attribute, "fr")
        rse.add_rse_attribute(self.rse4, self.attribute, "uk")
        rse.add_rse_attribute(self.rse5, self.attribute, "us")

        # Add numeric Attributes
        self.attribute_numeric = attribute_name_generator()

        rse.add_rse_attribute(self.rse1, self.attribute_numeric, 10)
        rse.add_rse_attribute(self.rse2, self.attribute_numeric, 20)
        rse.add_rse_attribute(self.rse3, self.attribute_numeric, 30)
        rse.add_rse_attribute(self.rse4, self.attribute_numeric, 40)
        rse.add_rse_attribute(self.rse5, self.attribute_numeric, 50)

        # Add Tags
        self.tag1 = tag_generator()
        self.tag2 = tag_generator()
        rse.add_rse_attribute(self.rse1, self.tag1, True)
        rse.add_rse_attribute(self.rse2, self.tag1, True)
        rse.add_rse_attribute(self.rse3, self.tag1, True)
        rse.add_rse_attribute(self.rse4, self.tag2, True)
        rse.add_rse_attribute(self.rse5, self.tag2, True)
コード例 #19
0
ファイル: importer.py プロジェクト: ijjorama/rucio
def import_rses(rses, vo='def', session=None):
    new_rses = []
    for rse_name in rses:
        rse = rses[rse_name]
        if isinstance(rse.get('rse_type'), string_types):
            rse['rse_type'] = RSEType.from_string(str(rse['rse_type']))
        try:
            rse_id = rse_module.get_rse_id(rse=rse_name,
                                           vo=vo,
                                           session=session)
        except RSENotFound:
            rse_id = rse_module.add_rse(rse=rse_name,
                                        vo=vo,
                                        deterministic=rse.get('deterministic'),
                                        volatile=rse.get('volatile'),
                                        city=rse.get('city'),
                                        region_code=rse.get('region_code'),
                                        country_name=rse.get('country_name'),
                                        staging_area=rse.get('staging_area'),
                                        continent=rse.get('continent'),
                                        time_zone=rse.get('time_zone'),
                                        ISP=rse.get('ISP'),
                                        rse_type=rse.get('rse_type'),
                                        latitude=rse.get('latitude'),
                                        longitude=rse.get('longitude'),
                                        ASN=rse.get('ASN'),
                                        availability=rse.get('availability'),
                                        session=session)
        else:
            rse_module.update_rse(rse_id=rse_id,
                                  parameters=rse,
                                  session=session)

        new_rses.append(rse_id)
        # Protocols
        new_protocols = rse.get('protocols')
        if new_protocols:
            # update existing, add missing and remove left over protocols
            old_protocols = [{
                'scheme': protocol['scheme'],
                'hostname': protocol['hostname'],
                'port': protocol['port']
            } for protocol in rse_module.get_rse_protocols(
                rse_id=rse_id, session=session)['protocols']]
            missing_protocols = [
                new_protocol for new_protocol in new_protocols if {
                    'scheme': new_protocol['scheme'],
                    'hostname': new_protocol['hostname'],
                    'port': new_protocol['port']
                } not in old_protocols
            ]
            outdated_protocols = [
                new_protocol for new_protocol in new_protocols if {
                    'scheme': new_protocol['scheme'],
                    'hostname': new_protocol['hostname'],
                    'port': new_protocol['port']
                } in old_protocols
            ]
            new_protocols = [{
                'scheme': protocol['scheme'],
                'hostname': protocol['hostname'],
                'port': protocol['port']
            } for protocol in new_protocols]
            to_be_removed_protocols = [
                old_protocol for old_protocol in old_protocols
                if old_protocol not in new_protocols
            ]
            for protocol in outdated_protocols:
                scheme = protocol['scheme']
                port = protocol['port']
                hostname = protocol['hostname']
                del protocol['scheme']
                del protocol['hostname']
                del protocol['port']
                rse_module.update_protocols(rse_id=rse_id,
                                            scheme=scheme,
                                            data=protocol,
                                            hostname=hostname,
                                            port=port,
                                            session=session)

            for protocol in missing_protocols:
                rse_module.add_protocol(rse_id=rse_id,
                                        parameter=protocol,
                                        session=session)

            for protocol in to_be_removed_protocols:
                scheme = protocol['scheme']
                port = protocol['port']
                hostname = protocol['hostname']
                rse_module.del_protocols(rse_id=rse_id,
                                         scheme=scheme,
                                         port=port,
                                         hostname=hostname,
                                         session=session)

        # Limits
        old_limits = rse_module.get_rse_limits(rse_id=rse_id, session=session)
        for limit_name in ['MaxBeingDeletedFiles', 'MinFreeSpace']:
            limit = rse.get(limit_name)
            if limit:
                if limit_name in old_limits:
                    rse_module.delete_rse_limit(rse_id=rse_id,
                                                name=limit_name,
                                                session=session)
                rse_module.set_rse_limits(rse_id=rse_id,
                                          name=limit_name,
                                          value=limit,
                                          session=session)

        # Attributes
        attributes = rse.get('attributes', {})
        attributes['lfn2pfn_algorithm'] = rse.get('lfn2pfn_algorithm')
        attributes['verify_checksum'] = rse.get('verify_checksum')

        old_attributes = rse_module.list_rse_attributes(rse_id=rse_id,
                                                        session=session)
        for attr in attributes:
            value = attributes[attr]
            if value is not None:
                if attr in old_attributes:
                    rse_module.del_rse_attribute(rse_id=rse_id,
                                                 key=attr,
                                                 session=session)
                rse_module.add_rse_attribute(rse_id=rse_id,
                                             key=attr,
                                             value=value,
                                             session=session)

    # set deleted flag to RSEs that are missing in the import data
    old_rses = [
        old_rse['id'] for old_rse in rse_module.list_rses(session=session)
    ]
    for old_rse in old_rses:
        if old_rse not in new_rses:
            try:
                rse_module.del_rse(rse_id=old_rse, session=session)
            except RSEOperationNotSupported:
                pass
コード例 #20
0
    def setup(self):
        # New RSE
        self.new_rse = rse_name_generator()

        # RSE 1 that already exists
        self.old_rse_1 = rse_name_generator()
        add_rse(self.old_rse_1, availability=1)
        add_protocol(
            self.old_rse_1, {
                'scheme': 'scheme1',
                'hostname': 'hostname1',
                'port': 1000,
                'impl': 'impl'
            })
        self.old_rse_id_1 = get_rse_id(self.old_rse_1)
        set_rse_limits(rse=self.old_rse_1, name='limit1', value='10')
        set_rse_transfer_limits(rse=self.old_rse_1,
                                activity='activity1',
                                max_transfers=10)
        add_rse_attribute(rse=self.old_rse_1, key='attr1', value='test10')

        # RSE 2 that already exists
        self.old_rse_2 = rse_name_generator()
        add_rse(self.old_rse_2)
        self.old_rse_id_2 = get_rse_id(self.old_rse_2)

        # RSE 3 that already exists
        self.old_rse_3 = rse_name_generator()
        add_rse(self.old_rse_3)
        self.old_rse_id_3 = get_rse_id(self.old_rse_3)

        # Distance that already exists
        add_distance(self.old_rse_id_1, self.old_rse_id_2)

        self.data1 = {
            'rses': [{
                'rse': self.new_rse,
                'rse_type': 'TAPE',
                'availability': 5,
                'city': 'NewCity',
                'protocols': {
                    'protocols': [{
                        'scheme': 'scheme',
                        'hostname': 'hostname',
                        'port': 1000,
                        'impl': 'impl'
                    }]
                },
                'limits': {
                    'limit1': 0
                },
                'transfer_limits': {
                    'activity1': {
                        'unknown_rse_id': {
                            'max_transfers': 1
                        }
                    }
                },
                'attributes': {
                    'attr1': 'test'
                }
            }, {
                'rse': self.old_rse_1,
                'protocols': {
                    'protocols': [{
                        'scheme': 'scheme1',
                        'hostname': 'hostname1',
                        'port': 1000,
                        'prefix': 'prefix',
                        'impl': 'impl1'
                    }, {
                        'scheme': 'scheme2',
                        'hostname': 'hostname2',
                        'port': 1001,
                        'impl': 'impl'
                    }]
                },
                'limits': {
                    'limit1': 0,
                    'limit2': 2
                },
                'transfer_limits': {
                    'activity1': {
                        self.old_rse_id_1: {
                            'max_transfers': 1
                        }
                    },
                    'activity2': {
                        self.old_rse_id_1: {
                            'max_transfers': 2
                        }
                    }
                },
                'attributes': {
                    'attr1': 'test1',
                    'attr2': 'test2'
                }
            }],
            'distances': {
                self.old_rse_1: {
                    self.old_rse_2: {
                        'src_rse_id': self.old_rse_id_1,
                        'dest_rse_id': self.old_rse_id_2,
                        'ranking': 10
                    },
                    self.old_rse_3: {
                        'src_rse_id': self.old_rse_id_1,
                        'dest_rse_id': self.old_rse_id_3,
                        'ranking': 4
                    }
                }
            }
        }
        self.data2 = {'rses': [{'rse': self.new_rse}]}
        self.data3 = {'distances': {}}
コード例 #21
0
    def setUp(self):
        if config_get_bool('common',
                           'multi_vo',
                           raise_exception=False,
                           default=False):
            self.vo = {
                'vo':
                config_get('client',
                           'vo',
                           raise_exception=False,
                           default='tst')
            }
        else:
            self.vo = {}

        self.rse1 = rse_name_generator()
        self.rse2 = rse_name_generator()
        self.rse3 = rse_name_generator()
        self.rse4 = rse_name_generator()
        self.rse5 = rse_name_generator()

        self.rse1_id = rse.add_rse(self.rse1, **self.vo)
        self.rse2_id = rse.add_rse(self.rse2, **self.vo)
        self.rse3_id = rse.add_rse(self.rse3, **self.vo)
        self.rse4_id = rse.add_rse(self.rse4, **self.vo)
        self.rse5_id = rse.add_rse(self.rse5, **self.vo)

        # Add Attributes
        self.attribute = attribute_name_generator()

        rse.add_rse_attribute(self.rse1_id, self.attribute, "at")
        rse.add_rse_attribute(self.rse2_id, self.attribute, "de")
        rse.add_rse_attribute(self.rse3_id, self.attribute, "fr")
        rse.add_rse_attribute(self.rse4_id, self.attribute, "uk")
        rse.add_rse_attribute(self.rse5_id, self.attribute, "us")

        # Add Tags
        self.tag1 = tag_generator()
        self.tag2 = tag_generator()
        rse.add_rse_attribute(self.rse1_id, self.tag1, True)
        rse.add_rse_attribute(self.rse2_id, self.tag1, True)
        rse.add_rse_attribute(self.rse3_id, self.tag1, True)
        rse.add_rse_attribute(self.rse4_id, self.tag2, True)
        rse.add_rse_attribute(self.rse5_id, self.tag2, True)

        self.rse_client = RSEClient()
コード例 #22
0
    def test_replica_sorting(self):
        """ REPLICA (CORE): Test the correct sorting of the replicas across WAN and LAN """

        self.rc = ReplicaClient()

        self.rse1 = 'APERTURE_%s' % rse_name_generator()
        self.rse2 = 'BLACKMESA_%s' % rse_name_generator()
        self.rse1_id = add_rse(self.rse1)
        self.rse2_id = add_rse(self.rse2)
        add_rse_attribute(rse_id=self.rse1_id, key='site', value='APERTURE')
        add_rse_attribute(rse_id=self.rse2_id, key='site', value='BLACKMESA')

        self.files = [{'scope': InternalScope('mock'), 'name': 'element_0',
                       'bytes': 1234, 'adler32': 'deadbeef'}]
        root = InternalAccount('root')
        add_replicas(rse_id=self.rse1_id, files=self.files, account=root)
        add_replicas(rse_id=self.rse2_id, files=self.files, account=root)

        add_protocol(self.rse1_id, {'scheme': 'root',
                                    'hostname': 'root.aperture.com',
                                    'port': 1409,
                                    'prefix': '//test/chamber/',
                                    'impl': 'rucio.rse.protocols.xrootd.Default',
                                    'domains': {
                                        'lan': {'read': 1, 'write': 1, 'delete': 1},
                                        'wan': {'read': 1, 'write': 1, 'delete': 1}}})
        add_protocol(self.rse1_id, {'scheme': 'davs',
                                    'hostname': 'davs.aperture.com',
                                    'port': 443,
                                    'prefix': '/test/chamber/',
                                    'impl': 'rucio.rse.protocols.gfal.Default',
                                    'domains': {
                                        'lan': {'read': 2, 'write': 2, 'delete': 2},
                                        'wan': {'read': 2, 'write': 2, 'delete': 2}}})
        add_protocol(self.rse1_id, {'scheme': 'gsiftp',
                                    'hostname': 'gsiftp.aperture.com',
                                    'port': 8446,
                                    'prefix': '/test/chamber/',
                                    'impl': 'rucio.rse.protocols.gfal.Default',
                                    'domains': {
                                        'lan': {'read': 0, 'write': 0, 'delete': 0},
                                        'wan': {'read': 3, 'write': 3, 'delete': 3}}})

        add_protocol(self.rse2_id, {'scheme': 'gsiftp',
                                    'hostname': 'gsiftp.blackmesa.com',
                                    'port': 8446,
                                    'prefix': '/lambda/complex/',
                                    'impl': 'rucio.rse.protocols.gfal.Default',
                                    'domains': {
                                        'lan': {'read': 2, 'write': 2, 'delete': 2},
                                        'wan': {'read': 1, 'write': 1, 'delete': 1}}})
        add_protocol(self.rse2_id, {'scheme': 'davs',
                                    'hostname': 'davs.blackmesa.com',
                                    'port': 443,
                                    'prefix': '/lambda/complex/',
                                    'impl': 'rucio.rse.protocols.gfal.Default',
                                    'domains': {
                                        'lan': {'read': 0, 'write': 0, 'delete': 0},
                                        'wan': {'read': 2, 'write': 2, 'delete': 2}}})
        add_protocol(self.rse2_id, {'scheme': 'root',
                                    'hostname': 'root.blackmesa.com',
                                    'port': 1409,
                                    'prefix': '//lambda/complex/',
                                    'impl': 'rucio.rse.protocols.xrootd.Default',
                                    'domains': {
                                        'lan': {'read': 1, 'write': 1, 'delete': 1},
                                        'wan': {'read': 3, 'write': 3, 'delete': 3}}})

        replicas = [r for r in self.rc.list_replicas(dids=[{'scope': 'mock',
                                                            'name': f['name'],
                                                            'type': 'FILE'} for f in self.files],
                                                     schemes=['root', 'gsiftp', 'davs'],
                                                     client_location={'site': 'APERTURE'})]
        pfns = [r['pfns'] for r in replicas][0]
        assert_equal(len(pfns.keys()), 5)
        assert_equal(pfns['root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0']['domain'], 'lan')
        assert_equal(pfns['root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0']['priority'], 1)
        assert_equal(pfns['davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0']['domain'], 'lan')
        assert_equal(pfns['davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0']['priority'], 2)
        assert_equal(pfns['gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0']['domain'], 'wan')
        assert_equal(pfns['gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0']['priority'], 3)
        assert_equal(pfns['davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0']['domain'], 'wan')
        assert_equal(pfns['davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0']['priority'], 4)
        assert_equal(pfns['root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0']['domain'], 'wan')
        assert_equal(pfns['root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0']['priority'], 5)

        replicas = [r for r in self.rc.list_replicas(dids=[{'scope': 'mock',
                                                            'name': f['name'],
                                                            'type': 'FILE'} for f in self.files],
                                                     schemes=['root', 'gsiftp', 'davs'],
                                                     client_location={'site': 'BLACKMESA'})]
        pfns = [r['pfns'] for r in replicas][0]
        assert_equal(len(pfns.keys()), 5)
        assert_equal(pfns['root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0']['domain'], 'lan')
        assert_equal(pfns['root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0']['priority'], 1)
        assert_equal(pfns['gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0']['domain'], 'lan')
        assert_equal(pfns['gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0']['priority'], 2)
        assert_equal(pfns['root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0']['domain'], 'wan')
        assert_equal(pfns['root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0']['priority'], 3)
        assert_equal(pfns['davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0']['domain'], 'wan')
        assert_equal(pfns['davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0']['priority'], 4)
        assert_equal(pfns['gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0']['domain'], 'wan')
        assert_equal(pfns['gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0']['priority'], 5)

        replicas = [r for r in self.rc.list_replicas(dids=[{'scope': 'mock',
                                                            'name': f['name'],
                                                            'type': 'FILE'} for f in self.files],
                                                     schemes=['root', 'gsiftp', 'davs'],
                                                     client_location={'site': 'XEN'})]
        pfns = [r['pfns'] for r in replicas][0]
        assert_equal(len(pfns.keys()), 6)
        # TODO: intractable until RSE sorting is enabled
        assert_equal(pfns['gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0']['domain'], 'wan')
        assert_in(pfns['gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0']['priority'], [1, 2])
        assert_equal(pfns['root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0']['domain'], 'wan')
        assert_in(pfns['root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0']['priority'], [1, 2])
        assert_equal(pfns['davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0']['domain'], 'wan')
        assert_in(pfns['davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0']['priority'], [3, 4])
        assert_equal(pfns['davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0']['domain'], 'wan')
        assert_in(pfns['davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0']['priority'], [3, 4])
        assert_equal(pfns['gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0']['domain'], 'wan')
        assert_in(pfns['gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0']['priority'], [5, 6])
        assert_equal(pfns['root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0']['domain'], 'wan')
        assert_in(pfns['root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0']['priority'], [5, 6])

        ml = self.rc.list_replicas(dids=[{'scope': 'mock',
                                          'name': f['name'],
                                          'type': 'FILE'} for f in self.files],
                                   schemes=['root', 'gsiftp', 'davs'],
                                   metalink=True,
                                   client_location={'site': 'APERTURE'})
        assert_in('domain="lan" priority="1" client_extract="false">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml)
        assert_in('domain="lan" priority="2" client_extract="false">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml)
        assert_in('domain="wan" priority="3" client_extract="false">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml)
        assert_in('domain="wan" priority="4" client_extract="false">davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0', ml)
        assert_in('domain="wan" priority="5" client_extract="false">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml)
        assert_not_in('priority="6"', ml)

        ml = self.rc.list_replicas(dids=[{'scope': 'mock',
                                          'name': f['name'],
                                          'type': 'FILE'} for f in self.files],
                                   schemes=['root', 'gsiftp', 'davs'],
                                   metalink=True,
                                   client_location={'site': 'BLACKMESA'})
        assert_in('domain="lan" priority="1" client_extract="false">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml)
        assert_in('domain="lan" priority="2" client_extract="false">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml)
        assert_in('domain="wan" priority="3" client_extract="false">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml)
        assert_in('domain="wan" priority="4" client_extract="false">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml)
        assert_in('domain="wan" priority="5" client_extract="false">gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0', ml)
        assert_not_in('priority="6"', ml)

        # TODO: intractable until RSE sorting is enabled
        # ml = self.rc.list_replicas(dids=[{'scope': 'mock',
        #                                   'name': f['name'],
        #                                   'type': 'FILE'} for f in self.files],
        #                            schemes=['root', 'gsiftp', 'davs'],
        #                            metalink=True,
        #                            client_location={'site': 'XEN'})
        # assert_in('domain="wan" priority="1">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml)
        # assert_in('domain="wan" priority="2">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml)
        # assert_in('domain="wan" priority="3">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml)
        # assert_in('domain="wan" priority="4">davs://davs.blackmesa.com:443/lambda/complex/mock/58/b5/element_0', ml)
        # assert_in('domain="wan" priority="5">gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0', ml)
        # assert_in('domain="wan" priority="6">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml)
        # assert_not_in('priority="7"', ml)

        # ensure correct handling of disabled protocols
        add_protocol(self.rse1_id, {'scheme': 'root',
                                    'hostname': 'root2.aperture.com',
                                    'port': 1409,
                                    'prefix': '//test/chamber/',
                                    'impl': 'rucio.rse.protocols.xrootd.Default',
                                    'domains': {
                                        'lan': {'read': 1, 'write': 1, 'delete': 1},
                                        'wan': {'read': 0, 'write': 0, 'delete': 0}}})

        ml = self.rc.list_replicas(dids=[{'scope': 'mock',
                                          'name': f['name'],
                                          'type': 'FILE'} for f in self.files],
                                   schemes=['root', 'gsiftp', 'davs'],
                                   metalink=True,
                                   client_location={'site': 'BLACKMESA'})
        assert_in('domain="lan" priority="1" client_extract="false">root://root.blackmesa.com:1409//lambda/complex/mock/58/b5/element_0', ml)
        assert_in('domain="lan" priority="2" client_extract="false">gsiftp://gsiftp.blackmesa.com:8446/lambda/complex/mock/58/b5/element_0', ml)
        assert_in('domain="wan" priority="3" client_extract="false">root://root.aperture.com:1409//test/chamber/mock/58/b5/element_0', ml)
        assert_in('domain="wan" priority="4" client_extract="false">davs://davs.aperture.com:443/test/chamber/mock/58/b5/element_0', ml)
        assert_in('domain="wan" priority="5" client_extract="false">gsiftp://gsiftp.aperture.com:8446/test/chamber/mock/58/b5/element_0', ml)
        assert_not_in('priority="6"', ml)

        delete_replicas(rse_id=self.rse1_id, files=self.files)
        delete_replicas(rse_id=self.rse2_id, files=self.files)
        del_rse(self.rse1_id)
        del_rse(self.rse2_id)
コード例 #23
0
    def setUp(self):
        if config_get_bool('common',
                           'multi_vo',
                           raise_exception=False,
                           default=False):
            self.vo = {
                'vo':
                config_get('client',
                           'vo',
                           raise_exception=False,
                           default='tst')
            }
            self.filter = {'filter': self.vo}
        else:
            self.vo = {}
            self.filter = {'filter': {'vo': 'def'}}

        self.rse1 = rse_name_generator()
        self.rse2 = rse_name_generator()
        self.rse3 = rse_name_generator()
        self.rse4 = rse_name_generator()
        self.rse5 = rse_name_generator()

        self.rse1_id = rse.add_rse(self.rse1, **self.vo)
        self.rse2_id = rse.add_rse(self.rse2, **self.vo)
        self.rse3_id = rse.add_rse(self.rse3, **self.vo)
        self.rse4_id = rse.add_rse(self.rse4, **self.vo)
        self.rse5_id = rse.add_rse(self.rse5, **self.vo)

        # Add Attributes
        self.attribute = attribute_name_generator()

        rse.add_rse_attribute(self.rse1_id, self.attribute, "at")
        rse.add_rse_attribute(self.rse2_id, self.attribute, "de")
        rse.add_rse_attribute(self.rse3_id, self.attribute, "fr")
        rse.add_rse_attribute(self.rse4_id, self.attribute, "uk")
        rse.add_rse_attribute(self.rse5_id, self.attribute, "us")

        # Add numeric Attributes
        self.attribute_numeric = attribute_name_generator()

        rse.add_rse_attribute(self.rse1_id, self.attribute_numeric, 10)
        rse.add_rse_attribute(self.rse2_id, self.attribute_numeric, 20)
        rse.add_rse_attribute(self.rse3_id, self.attribute_numeric, 30)
        rse.add_rse_attribute(self.rse4_id, self.attribute_numeric, 40)
        rse.add_rse_attribute(self.rse5_id, self.attribute_numeric, 50)

        # Add Tags
        self.tag1 = tag_generator()
        self.tag2 = tag_generator()
        rse.add_rse_attribute(self.rse1_id, self.tag1, True)
        rse.add_rse_attribute(self.rse2_id, self.tag1, True)
        rse.add_rse_attribute(self.rse3_id, self.tag1, True)
        rse.add_rse_attribute(self.rse4_id, self.tag2, True)
        rse.add_rse_attribute(self.rse5_id, self.tag2, True)
コード例 #24
0
ファイル: importer.py プロジェクト: wyang007/rucio
def import_rses(rses,
                rse_sync_method='edit',
                attr_sync_method='edit',
                protocol_sync_method='edit',
                vo='def',
                session=None):
    new_rses = []
    for rse_name in rses:
        rse = rses[rse_name]
        if isinstance(rse.get('rse_type'), string_types):
            rse['rse_type'] = RSEType(rse['rse_type'])

        if rse_module.rse_exists(rse_name,
                                 vo=vo,
                                 include_deleted=False,
                                 session=session):
            # RSE exists and is active
            rse_id = rse_module.get_rse_id(rse=rse_name,
                                           vo=vo,
                                           session=session)
            rse_module.update_rse(rse_id=rse_id,
                                  parameters=rse,
                                  session=session)
        elif rse_module.rse_exists(rse_name,
                                   vo=vo,
                                   include_deleted=True,
                                   session=session):
            # RSE exists but in deleted state
            # Should only modify the RSE if importer is configured for edit or hard sync
            if rse_sync_method in ['edit', 'hard']:
                rse_id = rse_module.get_rse_id(rse=rse_name,
                                               vo=vo,
                                               include_deleted=True,
                                               session=session)
                rse_module.restore_rse(rse_id, session=session)
                rse_module.update_rse(rse_id=rse_id,
                                      parameters=rse,
                                      session=session)
            else:
                # Config is in RSE append only mode, should not modify the disabled RSE
                continue
        else:
            rse_id = rse_module.add_rse(rse=rse_name,
                                        vo=vo,
                                        deterministic=rse.get('deterministic'),
                                        volatile=rse.get('volatile'),
                                        city=rse.get('city'),
                                        region_code=rse.get('region_code'),
                                        country_name=rse.get('country_name'),
                                        staging_area=rse.get('staging_area'),
                                        continent=rse.get('continent'),
                                        time_zone=rse.get('time_zone'),
                                        ISP=rse.get('ISP'),
                                        rse_type=rse.get('rse_type'),
                                        latitude=rse.get('latitude'),
                                        longitude=rse.get('longitude'),
                                        ASN=rse.get('ASN'),
                                        availability=rse.get('availability'),
                                        session=session)

        new_rses.append(rse_id)
        # Protocols
        new_protocols = rse.get('protocols')
        if new_protocols:
            # update existing, add missing and remove left over protocols
            old_protocols = [{
                'scheme': protocol['scheme'],
                'hostname': protocol['hostname'],
                'port': protocol['port']
            } for protocol in rse_module.get_rse_protocols(
                rse_id=rse_id, session=session)['protocols']]
            missing_protocols = [
                new_protocol for new_protocol in new_protocols if {
                    'scheme': new_protocol['scheme'],
                    'hostname': new_protocol['hostname'],
                    'port': new_protocol['port']
                } not in old_protocols
            ]
            outdated_protocols = [
                new_protocol for new_protocol in new_protocols if {
                    'scheme': new_protocol['scheme'],
                    'hostname': new_protocol['hostname'],
                    'port': new_protocol['port']
                } in old_protocols
            ]
            new_protocols = [{
                'scheme': protocol['scheme'],
                'hostname': protocol['hostname'],
                'port': protocol['port']
            } for protocol in new_protocols]
            to_be_removed_protocols = [
                old_protocol for old_protocol in old_protocols
                if old_protocol not in new_protocols
            ]

            if protocol_sync_method == 'append':
                outdated_protocols = []

            for protocol in outdated_protocols:
                scheme = protocol['scheme']
                port = protocol['port']
                hostname = protocol['hostname']
                del protocol['scheme']
                del protocol['hostname']
                del protocol['port']
                rse_module.update_protocols(rse_id=rse_id,
                                            scheme=scheme,
                                            data=protocol,
                                            hostname=hostname,
                                            port=port,
                                            session=session)

            for protocol in missing_protocols:
                rse_module.add_protocol(rse_id=rse_id,
                                        parameter=protocol,
                                        session=session)

            if protocol_sync_method == 'hard':
                for protocol in to_be_removed_protocols:
                    scheme = protocol['scheme']
                    port = protocol['port']
                    hostname = protocol['hostname']
                    rse_module.del_protocols(rse_id=rse_id,
                                             scheme=scheme,
                                             port=port,
                                             hostname=hostname,
                                             session=session)

        # Limits
        old_limits = rse_module.get_rse_limits(rse_id=rse_id, session=session)
        for limit_name in ['MaxBeingDeletedFiles', 'MinFreeSpace']:
            limit = rse.get(limit_name)
            if limit:
                if limit_name in old_limits:
                    rse_module.delete_rse_limits(rse_id=rse_id,
                                                 name=limit_name,
                                                 session=session)
                rse_module.set_rse_limits(rse_id=rse_id,
                                          name=limit_name,
                                          value=limit,
                                          session=session)

        # Attributes
        attributes = rse.get('attributes', {})
        attributes['lfn2pfn_algorithm'] = rse.get('lfn2pfn_algorithm')
        attributes['verify_checksum'] = rse.get('verify_checksum')

        old_attributes = rse_module.list_rse_attributes(rse_id=rse_id,
                                                        session=session)
        missing_attributes = [
            attribute for attribute in old_attributes
            if attribute not in attributes
        ]

        for attr in attributes:
            value = attributes[attr]
            if value is not None:
                if attr in old_attributes:
                    if attr_sync_method not in ['append']:
                        rse_module.del_rse_attribute(rse_id=rse_id,
                                                     key=attr,
                                                     session=session)
                        rse_module.add_rse_attribute(rse_id=rse_id,
                                                     key=attr,
                                                     value=value,
                                                     session=session)
                else:
                    rse_module.add_rse_attribute(rse_id=rse_id,
                                                 key=attr,
                                                 value=value,
                                                 session=session)
        if attr_sync_method == 'hard':
            for attr in missing_attributes:
                if attr != rse_name:
                    rse_module.del_rse_attribute(rse_id=rse_id,
                                                 key=attr,
                                                 session=session)

    # set deleted flag to RSEs that are missing in the import data
    old_rses = [
        old_rse['id'] for old_rse in rse_module.list_rses(session=session)
    ]
    if rse_sync_method == 'hard':
        for old_rse in old_rses:
            if old_rse not in new_rses:
                try:
                    rse_module.del_rse(rse_id=old_rse, session=session)
                except RSEOperationNotSupported:
                    pass
コード例 #25
0
def test_singlehop_vs_multihop_priority(rse_factory, root_account, mock_scope,
                                        core_config_mock, caches_mock):
    """
    On small distance difference, singlehop is prioritized over multihop
    due to HOP_PENALTY. On big difference, multihop is prioritized
    """
    # +------+    +------+
    # |      | 10 |      |
    # | RSE0 +--->| RSE1 |
    # |      |    |      +-+ 10
    # +------+    +------+ |  +------+       +------+
    #                      +->|      |  200  |      |
    # +------+                | RSE3 |<------| RSE4 |
    # |      |   30      +--->|      |       |      |
    # | RSE2 +-----------+    +------+       +------+
    # |      |
    # +------+
    _, rse0_id = rse_factory.make_posix_rse()
    _, rse1_id = rse_factory.make_posix_rse()
    _, rse2_id = rse_factory.make_posix_rse()
    rse3_name, rse3_id = rse_factory.make_posix_rse()
    _, rse4_id = rse_factory.make_posix_rse()

    add_distance(rse0_id, rse1_id, ranking=10)
    add_distance(rse1_id, rse3_id, ranking=10)
    add_distance(rse2_id, rse3_id, ranking=30)
    add_distance(rse4_id, rse3_id, ranking=200)
    rse_core.add_rse_attribute(rse1_id, 'available_for_multihop', True)

    # add same file to two source RSEs
    file = {
        'scope': mock_scope,
        'name': 'lfn.' + generate_uuid(),
        'type': 'FILE',
        'bytes': 1,
        'adler32': 'beefdead'
    }
    did = {'scope': file['scope'], 'name': file['name']}
    for rse_id in [rse0_id, rse2_id]:
        add_replicas(rse_id=rse_id, files=[file], account=root_account)

    rule_core.add_rule(dids=[did],
                       account=root_account,
                       copies=1,
                       rse_expression=rse3_name,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)

    # The singlehop must be prioritized
    transfers, _reqs_no_source, _reqs_scheme_mismatch, _reqs_only_tape_source = get_transfer_requests_and_source_replicas(
        rses=rse_factory.created_rses)
    assert len(transfers) == 1
    transfer = next(iter(transfers.values()))
    assert transfer['file_metadata']['src_rse_id'] == rse2_id
    assert transfer['file_metadata']['dest_rse_id'] == rse3_id

    # add same file to two source RSEs
    file = {
        'scope': mock_scope,
        'name': 'lfn.' + generate_uuid(),
        'type': 'FILE',
        'bytes': 1,
        'adler32': 'beefdead'
    }
    did = {'scope': file['scope'], 'name': file['name']}
    for rse_id in [rse0_id, rse4_id]:
        add_replicas(rse_id=rse_id, files=[file], account=root_account)

    rule_core.add_rule(dids=[did],
                       account=root_account,
                       copies=1,
                       rse_expression=rse3_name,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)

    # The multihop must be prioritized
    transfers, _reqs_no_source, _reqs_scheme_mismatch, _reqs_only_tape_source = get_transfer_requests_and_source_replicas(
        rses=rse_factory.created_rses)
    assert len([
        t for t in transfers.values()
        if t['file_metadata']['name'] == did['name']
    ]) == 2
コード例 #26
0
    def setUpClass(cls):
        # Add test RSE
        cls.rse1 = 'MOCK'
        cls.rse3 = 'MOCK3'
        cls.rse4 = 'MOCK4'
        cls.rse5 = 'MOCK5'

        cls.rse1_id = get_rse(cls.rse1).id
        cls.rse3_id = get_rse(cls.rse3).id
        cls.rse4_id = get_rse(cls.rse4).id
        cls.rse5_id = get_rse(cls.rse5).id

        # Add Tags
        cls.T1 = tag_generator()
        cls.T2 = tag_generator()
        add_rse_attribute(cls.rse1, cls.T1, True)
        add_rse_attribute(cls.rse3, cls.T1, True)
        add_rse_attribute(cls.rse4, cls.T2, True)
        add_rse_attribute(cls.rse5, cls.T1, True)

        # Add fake weights
        add_rse_attribute(cls.rse1, "fakeweight", 10)
        add_rse_attribute(cls.rse3, "fakeweight", 0)
        add_rse_attribute(cls.rse4, "fakeweight", 0)
        add_rse_attribute(cls.rse5, "fakeweight", 0)
コード例 #27
0
    def setup(self):
        # New RSE
        self.new_rse = rse_name_generator()

        # RSE 1 that already exists
        self.old_rse_1 = rse_name_generator()
        add_rse(self.old_rse_1, availability=1, region_code='DE', country_name='DE', deterministic=True, volatile=True, staging_area=True, time_zone='Europe', latitude='1', longitude='2')
        add_protocol(self.old_rse_1, {'scheme': 'scheme1', 'hostname': 'hostname1', 'port': 1000, 'impl': 'TODO'})
        add_protocol(self.old_rse_1, {'scheme': 'scheme3', 'hostname': 'hostname3', 'port': 1000, 'impl': 'TODO'})
        self.old_rse_id_1 = get_rse_id(self.old_rse_1)
        set_rse_limits(rse=self.old_rse_1, name='MaxBeingDeletedFiles', value='10')
        set_rse_limits(rse=self.old_rse_1, name='MinFreeSpace', value='10')
        add_rse_attribute(rse=self.old_rse_1, key='attr1', value='test10')
        add_rse_attribute(rse=self.old_rse_1, key='lfn2pfn_algorithm', value='test10')
        add_rse_attribute(rse=self.old_rse_1, key='verify_checksum', value=True)

        # RSE 2 that already exists
        self.old_rse_2 = rse_name_generator()
        add_rse(self.old_rse_2)
        self.old_rse_id_2 = get_rse_id(self.old_rse_2)

        # RSE 3 that already exists
        self.old_rse_3 = rse_name_generator()
        add_rse(self.old_rse_3)
        self.old_rse_id_3 = get_rse_id(self.old_rse_3)

        # Distance that already exists
        add_distance(self.old_rse_id_1, self.old_rse_id_2)

        self.data1 = {
            'rses': {
                self.new_rse: {
                    'rse_type': RSEType.TAPE,
                    'availability': 3,
                    'city': 'NewCity',
                    'region_code': 'CH',
                    'country_name': 'switzerland',
                    'staging_area': False,
                    'time_zone': 'Europe',
                    'latitude': 1,
                    'longitude': 2,
                    'deterministic': True,
                    'volatile': False,
                    'protocols': [{
                        'scheme': 'scheme',
                        'hostname': 'hostname',
                        'port': 1000,
                        'impl': 'impl'
                    }],
                    'attributes': {
                        'attr1': 'test'
                    },
                    'MinFreeSpace': 20000,
                    'lfn2pfn_algorithm': 'hash2',
                    'verify_checksum': False,
                    'availability_delete': True,
                    'availability_read': False,
                    'availability_write': True
                },
                self.old_rse_1: {
                    'rse_type': RSEType.TAPE,
                    'deterministic': False,
                    'volatile': False,
                    'region_code': 'US',
                    'country_name': 'US',
                    'staging_area': False,
                    'time_zone': 'Asia',
                    'longitude': 5,
                    'city': 'City',
                    'availability': 2,
                    'latitude': 10,
                    'protocols': [{
                        'scheme': 'scheme1',
                        'hostname': 'hostname1',
                        'port': 1000,
                        'prefix': 'prefix',
                        'impl': 'impl1'
                    }, {
                        'scheme': 'scheme2',
                        'hostname': 'hostname2',
                        'port': 1001,
                        'impl': 'impl'
                    }],
                    'attributes': {
                        'attr1': 'test1',
                        'attr2': 'test2'
                    },
                    'MinFreeSpace': 10000,
                    'MaxBeingDeletedFiles': 1000,
                    'verify_checksum': False,
                    'lfn2pfn_algorithm': 'hash3',
                    'availability_delete': False,
                    'availability_read': False,
                    'availability_write': True
                }
            },
            'distances': {
                self.old_rse_1: {
                    self.old_rse_2: {'src_rse_id': self.old_rse_id_1, 'dest_rse_id': self.old_rse_id_2, 'ranking': 10},
                    self.old_rse_3: {'src_rse_id': self.old_rse_id_1, 'dest_rse_id': self.old_rse_id_3, 'ranking': 4}
                }
            }
        }

        self.data2 = {'rses': {self.new_rse: {'rse': self.new_rse}}}
        self.data3 = {'distances': {}}