Пример #1
0
def add_sync_rule(rse, scope, block, account, session=None):
    rse_id = get_rse_id(rse=rse, session=session)
    scope = InternalScope(scope)
    account = InternalAccount(account)

    files = []
    for file in list_files(scope, block, long=False, session=session):
        try:
            update_replicas_states(
                replicas=[
                    {
                        "scope": scope,
                        "name": file["name"],
                        "rse_id": rse_id,
                        "state": "A",
                    }
                ],
                add_tombstone=False,
                session=session,
            )
        except ReplicaNotFound:
            add_replicas(
                rse_id=rse_id,
                files=[
                    {
                        "scope": scope,
                        "name": file["name"],
                        "bytes": file["bytes"],
                        "adler32": file["adler32"],
                    }
                ],
                account=account,
                ignore_availability=True,
                session=session,
            )

    rules = add_rule(
        dids=[{"scope": scope, "name": block}],
        copies=1,
        rse_expression=rse,
        weight=None,
        lifetime=None,
        grouping="DATASET",
        account=account,
        locked=False,
        subscription_id=None,
        source_replica_expression=None,
        activity="Data Consolidation",
        notify=None,
        purge_replicas=False,
        ignore_availability=True,
        comment=None,
        ask_approval=False,
        asynchronous=False,
        priority=3,
        split_container=False,
        meta=json.dumps({"phedex_group": "DataOps", "phedex_custodial": True}),
        session=session,
    )
    return rules[0]
Пример #2
0
def add_replicas(rse, files, issuer, ignore_availability=False, vo='def'):
    """
    Bulk add file replicas.

    :param rse: The RSE name.
    :param files: The list of files.
    :param issuer: The issuer account.
    :param ignore_availability: Ignore blocked RSEs.
    :param vo: The VO to act on.

    :returns: True is successful, False otherwise
    """
    for v_file in files:
        v_file.update({"type": "FILE"})  # Make sure DIDs are identified as files for checking
    validate_schema(name='dids', obj=files, vo=vo)

    rse_id = get_rse_id(rse=rse, vo=vo)

    kwargs = {'rse': rse, 'rse_id': rse_id}
    if not permission.has_permission(issuer=issuer, vo=vo, action='add_replicas', kwargs=kwargs):
        raise exception.AccessDenied('Account %s can not add file replicas on %s' % (issuer, rse))
    if not permission.has_permission(issuer=issuer, vo=vo, action='skip_availability_check', kwargs=kwargs):
        ignore_availability = False

    issuer = InternalAccount(issuer, vo=vo)
    for f in files:
        f['scope'] = InternalScope(f['scope'], vo=vo)
        if 'account' in f:
            f['account'] = InternalAccount(f['account'], vo=vo)

    replica.add_replicas(rse_id=rse_id, files=files, account=issuer, ignore_availability=ignore_availability)
Пример #3
0
 def test_update_replicas_paths(self):
     """ REPLICA (CORE): Force update the replica path """
     tmp_scope = 'mock'
     nbfiles = 5
     rse_info = rsemgr.get_rse_info('MOCK')
     files = [{
         'scope': tmp_scope,
         'name': 'file_%s' % generate_uuid(),
         'pfn':
         'srm://mock2.com:8443/srm/managerv2?SFN=/rucio/tmpdisk/rucio_tests/does/not/really/matter/where',
         'bytes': 1,
         'adler32': '0cc737eb',
         'meta': {
             'events': 10
         },
         'rse_id': rse_info['id'],
         'path': '/does/not/really/matter/where'
     } for _ in range(nbfiles)]
     add_replicas(rse='MOCK2',
                  files=files,
                  account='root',
                  ignore_availability=True)
     update_replicas_paths(files)
     for replica in list_replicas(dids=[{
             'scope': f['scope'],
             'name': f['name'],
             'type': DIDType.FILE
     } for f in files],
                                  schemes=['srm']):
         # force the changed string - if we look it up from the DB, then we're not testing anything :-D
         assert_equal(
             replica['rses']['MOCK2'][0],
             'srm://mock2.com:8443/srm/managerv2?SFN=/rucio/tmpdisk/rucio_tests/does/not/really/matter/where'
         )
Пример #4
0
    def test_add_list_replicas(self):
        """ REPLICA (CORE): Add and list file replicas """
        tmp_scope = 'mock'
        nbfiles = 13
        files = [{
            'scope': tmp_scope,
            'name': 'file_%s' % generate_uuid(),
            'bytes': 1,
            'adler32': '0cc737eb',
            'meta': {
                'events': 10
            }
        } for _ in range(nbfiles)]
        rses = ['MOCK', 'MOCK3']
        for rse in rses:
            add_replicas(rse=rse,
                         files=files,
                         account='root',
                         ignore_availability=True)

        replica_cpt = 0
        for _ in list_replicas(dids=[{
                'scope': f['scope'],
                'name': f['name'],
                'type': DIDType.FILE
        } for f in files],
                               schemes=['srm']):
            replica_cpt += 1

        assert_equal(nbfiles, replica_cpt)
Пример #5
0
 def test_get_did_from_pfns_deterministic(self):
     """ REPLICA (CLIENT): Get list of DIDs associated to PFNs for deterministic sites"""
     tmp_scope = 'mock'
     rse = 'MOCK3'
     nbfiles = 3
     pfns = []
     input = {}
     rse_info = rsemgr.get_rse_info(rse)
     assert_equal(rse_info['deterministic'], True)
     files = [{
         'scope': tmp_scope,
         'name': 'file_%s' % generate_uuid(),
         'bytes': 1,
         'adler32': '0cc737eb',
         'meta': {
             'events': 10
         }
     } for _ in range(nbfiles)]
     p = rsemgr.create_protocol(rse_info, 'read', scheme='srm')
     for f in files:
         pfn = p.lfns2pfns(lfns={
             'scope': f['scope'],
             'name': f['name']
         }).values()[0]
         pfns.append(pfn)
         input[pfn] = {'scope': f['scope'], 'name': f['name']}
     add_replicas(rse=rse,
                  files=files,
                  account='root',
                  ignore_availability=True)
     for result in self.replica_client.get_did_from_pfns(pfns, rse):
         pfn = result.keys()[0]
         assert_equal(input[pfn], result.values()[0])
Пример #6
0
def add_replicas(rse, files, issuer, ignore_availability=False):
    """
    Bulk add file replicas.

    :param rse: The RSE name.
    :param files: The list of files.
    :param issuer: The issuer account.
    :param ignore_availability: Ignore the RSE blacklisting.

    :returns: True is successful, False otherwise
    """
    validate_schema(name='dids', obj=files)

    rse_id = get_rse_id(rse=rse)

    kwargs = {'rse': rse, 'rse_id': rse_id}
    if not permission.has_permission(
            issuer=issuer, action='add_replicas', kwargs=kwargs):
        raise exception.AccessDenied(
            'Account %s can not add file replicas on %s' % (issuer, rse))
    if not permission.has_permission(
            issuer=issuer, action='skip_availability_check', kwargs=kwargs):
        ignore_availability = False

    replica.add_replicas(rse_id=rse_id,
                         files=files,
                         account=issuer,
                         ignore_availability=ignore_availability)
Пример #7
0
def test_two_sources_one_destination(db_session, vo, file, source_rse,
                                     source2_rse, mock_request):
    add_replicas(rse_id=source2_rse['id'],
                 files=[file],
                 account=mock_request.account,
                 session=db_session)
    try:
        src1_distance, src2_distance = (get_distances(
            src_rse_id=src_rse,
            dest_rse_id=mock_request.dest_rse_id,
            session=db_session) for src_rse in (source_rse['id'],
                                                source2_rse['id']))

        assert src1_distance and len(
            src1_distance) == 1 and src1_distance[0]['ranking'] == 5
        assert src2_distance and len(
            src2_distance) == 1 and src2_distance[0]['ranking'] == 2

        preparer.run_once(session=db_session)
        db_session.commit()

        updated_mock_request = db_session.query(models.Request).filter_by(
            id=mock_request.id).one()  # type: models.Request

        assert updated_mock_request.state == RequestState.QUEUED
        assert updated_mock_request.source_rse_id == source2_rse[
            'id']  # distance 2 < 5

    finally:
        delete_replicas(rse_id=source2_rse['id'],
                        files=[file],
                        session=db_session)
        db_session.commit()
Пример #8
0
    def test_touch_replicas(self):
        """ REPLICA (CORE): Touch replicas accessed_at timestamp"""
        tmp_scope = 'mock'
        nbfiles = 5
        files1 = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for i in range(nbfiles)]
        files2 = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for i in range(nbfiles)]
        files2.append(files1[0])
        add_replicas(rse='MOCK', files=files1, account='root', ignore_availability=True)
        add_replicas(rse='MOCK', files=files2, account='root', ignore_availability=True)

        now = datetime.utcnow()

        now -= timedelta(microseconds=now.microsecond)

        assert_equal(None, get_replica_atime({'scope': files1[0]['scope'], 'name': files1[0]['name'], 'rse': 'MOCK'}))
        assert_equal(None, get_did_atime(scope=tmp_scope, name=files1[0]['name']))

        for r in [{'scope': files1[0]['scope'], 'name': files1[0]['name'], 'rse': 'MOCK', 'accessed_at': now}]:
            touch_replica(r)

        assert_equal(now, get_replica_atime({'scope': files1[0]['scope'], 'name': files1[0]['name'], 'rse': 'MOCK'}))
        assert_equal(now, get_did_atime(scope=tmp_scope, name=files1[0]['name']))

        for i in range(1, nbfiles):
            assert_equal(None, get_replica_atime({'scope': files1[i]['scope'], 'name': files1[i]['name'], 'rse': 'MOCK'}))

        for i in range(0, nbfiles - 1):
            assert_equal(None, get_replica_atime({'scope': files2[i]['scope'], 'name': files2[i]['name'], 'rse': 'MOCK'}))
Пример #9
0
    def test_list_archive_contents_transparently(self):
        """ ARCHIVE (CORE): Transparent archive listing """

        scope = InternalScope('mock', **self.vo)
        rse = 'APERTURE_%s' % rse_name_generator()
        rse_id = add_rse(rse, **self.vo)
        root = InternalAccount('root', **self.vo)

        add_protocol(rse_id, {'scheme': 'root',
                              'hostname': 'root.aperture.com',
                              'port': 1409,
                              'prefix': '//test/chamber/',
                              'impl': 'rucio.rse.protocols.xrootd.Default',
                              'domains': {
                                  'lan': {'read': 1, 'write': 1, 'delete': 1},
                                  'wan': {'read': 1, 'write': 1, 'delete': 1}}})

        # register archive
        archive = {'scope': scope, 'name': 'weighted.storage.cube.zip', 'type': 'FILE',
                   'bytes': 2596, 'adler32': 'beefdead'}
        archive_client = archive.copy()
        archive_client['scope'] = archive_client['scope'].external

        add_replicas(rse_id=rse_id, files=[archive], account=root)

        # archived files with replicas
        files_with_replicas = [{'scope': scope, 'name': 'witrep-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE',
                                'bytes': 1234, 'adler32': 'deadbeef'} for i in range(2)]
        files_with_replicas_client = []
        for f in files_with_replicas:
            new_file = f.copy()
            new_file['scope'] = new_file['scope'].external
            files_with_replicas_client.append(new_file)

        add_replicas(rse_id=rse_id, files=files_with_replicas, account=root)
        self.dc.add_files_to_archive(scope=archive_client['scope'], name=archive_client['name'], files=files_with_replicas_client)

        res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files_with_replicas_client],
                                                        resolve_archives=True)]
        assert_equal(len(res), 2)
        assert_equal(len(res[0]), 2)
        assert_equal(len(res[1]), 2)
        for r in res:
            for p in r:
                if r[p]['domain'] == 'zip':
                    assert_in('weighted.storage.cube.zip?xrdcl.unzip=witrep-', p)
                else:
                    assert_not_in('weighted.storage.cube.zip?xrdcl.unzip=witrep-', p)

        # archived files without replicas
        files = [{'scope': scope.external, 'name': 'norep-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE',
                  'bytes': 1234, 'adler32': 'deadbeef'} for i in range(2)]
        self.dc.add_files_to_archive(scope=archive_client['scope'], name=archive_client['name'], files=files)
        res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files],
                                                        resolve_archives=True)]
        assert_equal(len(res), 2)
        for r in res:
            assert_in('weighted.storage.cube.zip?xrdcl.unzip=norep-', list(r.keys())[0])
Пример #10
0
def test_archive_on_dataset_level(rse_factory, did_factory, root_account):
    rse_name, rse_id = rse_factory.make_xroot_rse()

    dataset1 = did_factory.make_dataset()
    dataset2 = did_factory.make_dataset()
    container = did_factory.make_container()
    attach_dids(dids=[dataset1, dataset2], account=root_account, **container)

    # Add a random file to the datasets to avoid dataset deletion when the archive is deleted
    a_file = did_factory.random_did()
    add_replicas(rse_id=rse_id,
                 files=[{
                     **a_file, 'bytes': 500,
                     'type': 'FILE',
                     'adler32': 'beefbeef'
                 }],
                 account=root_account)
    attach_dids(dids=[a_file], account=root_account, **dataset1)
    attach_dids(dids=[a_file], account=root_account, **dataset2)
    # adding a non-archive file should not set is_archive=True
    metadata = get_metadata(**dataset1)
    assert not metadata['is_archive']

    # Create an archive and its constituents, attach the archive to datasets
    archive = did_factory.random_did(name_prefix='archive', name_suffix='.zip')
    add_replicas(rse_id=rse_id,
                 files=[{
                     **archive, 'bytes': 500,
                     'type': 'FILE',
                     'adler32': 'beefbeef'
                 }],
                 account=root_account)
    constituents = [did_factory.random_did() for _ in range(2)]
    # Add archive to one dataset _before_ attaching files to the archive (before is_archive is set on the archive did)
    attach_dids(dids=[archive], account=root_account, **dataset1)
    attach_dids(dids=[{
        **c, 'bytes': 200,
        'adler32': 'ababbaba'
    } for c in constituents],
                account=root_account,
                **archive)
    # Attach to another dataset _after_ attaching files to the archive
    attach_dids(dids=[archive], account=root_account, **dataset2)

    # Both datasets must have is_archive = True
    metadata = get_metadata(**dataset1)
    assert metadata['is_archive'] is True
    metadata = get_metadata(**dataset2)
    assert metadata['is_archive'] is True

    # Delete the archive, the datasets must now have is_archive == false
    delete_replicas(rse_id=rse_id, files=[archive])

    metadata = get_metadata(**dataset1)
    assert not metadata['is_archive']
    metadata = get_metadata(**dataset2)
    assert not metadata['is_archive']
Пример #11
0
 def setup(rse):
     add_distance(rse.rse_id,
                  dest_rse['id'],
                  ranking=2,
                  session=rse.db_session)
     add_replicas(rse_id=rse.rse_id,
                  files=[file],
                  account=mock_request.account,
                  session=rse.db_session)
Пример #12
0
    def test_add_list_bad_replicas(self):
        """ REPLICA (CORE): Add bad replicas and list them"""
        tmp_scope = 'mock'
        nbfiles = 5
        # Adding replicas to deterministic RSE
        files = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for i in range(nbfiles)]
        rse_info = rsemgr.get_rse_info('MOCK')
        rse_id1 = rse_info['id']
        add_replicas(rse='MOCK', files=files, account='root', ignore_availability=True)

        # Listing replicas on deterministic RSE
        replicas = []
        list_rep = []
        for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm']):
            replicas.extend(replica['rses']['MOCK'])
            list_rep.append(replica)
        r = declare_bad_file_replicas(replicas, 'This is a good reason', 'root')
        assert_equal(r, {})
        bad_replicas = list_bad_replicas()
        nbbadrep = 0
        for rep in list_rep:
            for badrep in bad_replicas:
                if badrep['rse_id'] == rse_id1:
                    if badrep['scope'] == rep['scope'] and badrep['name'] == rep['name']:
                        nbbadrep += 1
        assert_equal(len(replicas), nbbadrep)

        # Adding replicas to non-deterministic RSE
        files = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb',
                  'pfn': 'srm://mock2.com:8443/srm/managerv2?SFN=/rucio/tmpdisk/rucio_tests/%s/%s' % (tmp_scope, generate_uuid()), 'meta': {'events': 10}} for i in range(nbfiles)]
        rse_info = rsemgr.get_rse_info('MOCK2')
        rse_id2 = rse_info['id']
        add_replicas(rse='MOCK2', files=files, account='root', ignore_availability=True)

        # Listing replicas on non-deterministic RSE
        replicas = []
        list_rep = []
        for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm']):
            replicas.extend(replica['rses']['MOCK2'])
            list_rep.append(replica)
        r = declare_bad_file_replicas(replicas, 'This is a good reason', 'root')
        assert_equal(r, {})
        bad_replicas = list_bad_replicas()
        nbbadrep = 0
        for rep in list_rep:
            for badrep in bad_replicas:
                if badrep['rse_id'] == rse_id2:
                    if badrep['scope'] == rep['scope'] and badrep['name'] == rep['name']:
                        nbbadrep += 1
        assert_equal(len(replicas), nbbadrep)

        # Now adding non-existing bad replicas
        files = ['srm://mock2.com/rucio/tmpdisk/rucio_tests/%s/%s' % (tmp_scope, generate_uuid()), ]
        r = declare_bad_file_replicas(files, 'This is a good reason', 'root')
        output = ['%s Unknown replica' % rep for rep in files]
        assert_equal(r, {'MOCK2': output})
Пример #13
0
def test_singlehop_vs_multihop_priority(rse_factory, root_account, mock_scope, core_config_mock, caches_mock):
    """
    On small distance difference, singlehop is prioritized over multihop
    due to HOP_PENALTY. On big difference, multihop is prioritized
    """
    # +------+    +------+
    # |      | 10 |      |
    # | RSE0 +--->| RSE1 |
    # |      |    |      +-+ 10
    # +------+    +------+ |  +------+       +------+
    #                      +->|      |  200  |      |
    # +------+                | RSE3 |<------| RSE4 |
    # |      |   30      +--->|      |       |      |
    # | RSE2 +-----------+    +------+       +------+
    # |      |
    # +------+
    _, rse0_id = rse_factory.make_posix_rse()
    _, rse1_id = rse_factory.make_posix_rse()
    _, rse2_id = rse_factory.make_posix_rse()
    rse3_name, rse3_id = rse_factory.make_posix_rse()
    _, rse4_id = rse_factory.make_posix_rse()

    add_distance(rse0_id, rse1_id, ranking=10)
    add_distance(rse1_id, rse3_id, ranking=10)
    add_distance(rse2_id, rse3_id, ranking=30)
    add_distance(rse4_id, rse3_id, ranking=200)
    rse_core.add_rse_attribute(rse1_id, 'available_for_multihop', True)

    # add same file to two source RSEs
    file = {'scope': mock_scope, 'name': 'lfn.' + generate_uuid(), 'type': 'FILE', 'bytes': 1, 'adler32': 'beefdead'}
    did = {'scope': file['scope'], 'name': file['name']}
    for rse_id in [rse0_id, rse2_id]:
        add_replicas(rse_id=rse_id, files=[file], account=root_account)

    rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=rse3_name, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)

    # The singlehop must be prioritized
    [[_, [transfer]]] = next_transfers_to_submit(rses=rse_factory.created_rses).items()
    assert len(transfer) == 1
    assert transfer[0].src.rse.id == rse2_id
    assert transfer[0].dst.rse.id == rse3_id

    # add same file to two source RSEs
    file = {'scope': mock_scope, 'name': 'lfn.' + generate_uuid(), 'type': 'FILE', 'bytes': 1, 'adler32': 'beefdead'}
    did = {'scope': file['scope'], 'name': file['name']}
    for rse_id in [rse0_id, rse4_id]:
        add_replicas(rse_id=rse_id, files=[file], account=root_account)

    rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=rse3_name, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)

    # The multihop must be prioritized
    [[_, transfers]] = next_transfers_to_submit(rses=rse_factory.created_rses).items()
    transfer = next(iter(t for t in transfers if t[0].rws.name == file['name']))
    assert len(transfer) == 2
Пример #14
0
def test_add_list_bad_replicas(rse_factory, mock_scope, root_account):
    """ REPLICA (CORE): Add bad replicas and list them"""

    nbfiles = 5
    # Adding replicas to deterministic RSE
    _, rse1_id = rse_factory.make_srm_rse(deterministic=True)
    files = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles)]
    add_replicas(rse_id=rse1_id, files=files, account=root_account, ignore_availability=True)

    # Listing replicas on deterministic RSE
    replicas = []
    list_rep = []
    for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm']):
        replicas.extend(replica['rses'][rse1_id])
        list_rep.append(replica)
    r = declare_bad_file_replicas(replicas, 'This is a good reason', root_account)
    assert r == {}
    bad_replicas = list_bad_replicas()
    nbbadrep = 0
    for rep in list_rep:
        for badrep in bad_replicas:
            if badrep['rse_id'] == rse1_id:
                if badrep['scope'] == rep['scope'] and badrep['name'] == rep['name']:
                    nbbadrep += 1
    assert len(replicas) == nbbadrep

    # Adding replicas to non-deterministic RSE
    _, rse2_id = rse_factory.make_srm_rse(deterministic=False)
    files = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb',
              'pfn': 'srm://%s.cern.ch/srm/managerv2?SFN=/test/%s/%s' % (rse2_id, mock_scope, generate_uuid()), 'meta': {'events': 10}} for _ in range(nbfiles)]
    add_replicas(rse_id=rse2_id, files=files, account=root_account, ignore_availability=True)

    # Listing replicas on non-deterministic RSE
    replicas = []
    list_rep = []
    for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm']):
        replicas.extend(replica['rses'][rse2_id])
        list_rep.append(replica)
    r = declare_bad_file_replicas(replicas, 'This is a good reason', root_account)
    assert r == {}
    bad_replicas = list_bad_replicas()
    nbbadrep = 0
    for rep in list_rep:
        for badrep in bad_replicas:
            if badrep['rse_id'] == rse2_id:
                if badrep['scope'] == rep['scope'] and badrep['name'] == rep['name']:
                    nbbadrep += 1
    assert len(replicas) == nbbadrep

    # Now adding non-existing bad replicas
    files = ['srm://%s.cern.ch/test/%s/%s' % (rse2_id, mock_scope, generate_uuid()), ]
    r = declare_bad_file_replicas(files, 'This is a good reason', root_account)
    output = ['%s Unknown replica' % rep for rep in files]
    assert r == {rse2_id: output}
Пример #15
0
    def test_list_archive_contents_at_rse(self):
        """ ARCHIVE (CORE): Transparent archive listing at RSE """

        scope = 'mock'

        rse1 = 'APERTURE_%s' % rse_name_generator()
        add_rse(rse1)
        add_protocol(rse1, {'scheme': 'root',
                            'hostname': 'root.aperture.com',
                            'port': 1409,
                            'prefix': '//test/chamber/',
                            'impl': 'rucio.rse.protocols.xrootd.Default',
                            'domains': {
                                'lan': {'read': 1, 'write': 1, 'delete': 1},
                                'wan': {'read': 1, 'write': 1, 'delete': 1}}})

        rse2 = 'BLACKMESA_%s' % rse_name_generator()
        add_rse(rse2)
        add_protocol(rse2, {'scheme': 'root',
                            'hostname': 'root.blackmesa.com',
                            'port': 1409,
                            'prefix': '//lambda/complex/',
                            'impl': 'rucio.rse.protocols.xrootd.Default',
                            'domains': {
                                'lan': {'read': 1, 'write': 1, 'delete': 1},
                                'wan': {'read': 1, 'write': 1, 'delete': 1}}})

        # register archive
        archive1 = {'scope': scope, 'name': 'cube.1.zip', 'type': 'FILE', 'bytes': 2596, 'adler32': 'beefdead'}
        archive2 = {'scope': scope, 'name': 'cube.2.zip', 'type': 'FILE', 'bytes': 5432, 'adler32': 'deadbeef'}
        add_replicas(rse=rse1, files=[archive1], account='root')
        add_replicas(rse=rse2, files=[archive2], account='root')

        # archived files with replicas
        archived_file = [{'scope': scope, 'name': 'zippedfile-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE',
                          'bytes': 4322, 'adler32': 'beefbeef'} for i in xrange(2)]
        self.dc.add_files_to_archive(scope=scope, name=archive1['name'], files=archived_file)
        self.dc.add_files_to_archive(scope=scope, name=archive2['name'], files=archived_file)

        res = [r['pfns'] for r in self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file],
                                                        rse_expression=rse1,
                                                        resolve_archives=True)]

        res = self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], metalink=True, rse_expression=rse1, resolve_archives=True)
        assert_in('APERTURE', res)
        assert_not_in('BLACKMESA', res)

        res = self.rc.list_replicas(dids=[{'scope': scope, 'name': f['name']} for f in archived_file], metalink=True, rse_expression=rse2, resolve_archives=True)
        assert_in('BLACKMESA', res)
        assert_not_in('APERTURE', res)

        del_rse(rse1)
        del_rse(rse2)
Пример #16
0
def test_get_bad_replicas_backlog(rse_factory, mock_scope, root_account, file_config_mock):
    """ REPLICA (CORE): Check the behaviour of the necromancer in case of backlog on an RSE"""

    # Run necromancer once
    necromancer_run(threads=1, bulk=10000, once=True)

    nbfiles1 = 100
    nbfiles2 = 20
    # Adding replicas to deterministic RSE
    rse1, rse1_id = rse_factory.make_srm_rse(deterministic=True)
    _, rse2_id = rse_factory.make_srm_rse(deterministic=True)

    # Create bad replicas on rse1
    files = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles1)]
    add_replicas(rse_id=rse1_id, files=files, account=root_account, ignore_availability=True)

    replicas = []
    list_rep = []
    for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm']):
        replicas.extend(replica['rses'][rse1_id])
        list_rep.append({'scope': replica['scope'], 'name': replica['name'], 'rse': rse1, 'rse_id': rse1_id})
    res = declare_bad_file_replicas(replicas, 'This is a good reason', root_account)
    assert res == {}

    result = get_bad_replicas_backlog(force_refresh=True)
    assert rse1_id in result
    assert result[rse1_id] == nbfiles1

    # Create more bad replicas on rse2
    files = [{'scope': mock_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for _ in range(nbfiles2)]
    add_replicas(rse_id=rse2_id, files=files, account=root_account, ignore_availability=True)

    repl = []
    for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm']):
        repl.extend(replica['rses'][rse2_id])
    res = declare_bad_file_replicas(repl, 'This is a good reason', root_account)
    assert res == {}

    # List bad replicas on rse1
    bad_replicas = list_bad_replicas(rses=[{'id': rse1_id}])
    assert len(bad_replicas) == nbfiles1
    for rep in bad_replicas:
        assert rep in list_rep

    # Run necromancer once, all the files on RSE2 should be gone, 80 files should stay on RSE1
    get_bad_replicas_backlog(force_refresh=True)
    necromancer_run(threads=1, bulk=20, once=True)

    bad_replicas = list_bad_replicas(rses=[{'id': rse1_id}, {'id': rse2_id}])
    assert len(bad_replicas) == 80
    for rep in bad_replicas:
        assert rep['rse_id'] == rse1_id
Пример #17
0
    def setup(self):
        if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
            self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
        else:
            self.vo = {}

        self.rc = client.ReplicaClient()
        self.rse1 = rse_name_generator()
        self.rse2 = rse_name_generator()
        self.rse1_id = add_rse(self.rse1, **self.vo)
        self.rse2_id = add_rse(self.rse2, **self.vo)

        add_protocol(self.rse1_id, {'scheme': 'https',
                                    'hostname': 'storage.googleapis.com',
                                    'port': 443,
                                    'prefix': '/atlas-europe-west1/',
                                    'impl': 'rucio.rse.protocols.gfal.Default',
                                    'domains': {
                                        'lan': {'read': 1, 'write': 1, 'delete': 1},
                                        'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}})

        add_protocol(self.rse2_id, {'scheme': 'https',
                                    'hostname': 'storage.googleapis.com',
                                    'port': 443,
                                    'prefix': '/atlas-europe-east1/',
                                    'impl': 'rucio.rse.protocols.gfal.Default',
                                    'domains': {
                                        'lan': {'read': 1, 'write': 1, 'delete': 1},
                                        'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}})

        # register some files there
        self.files = [{'scope': InternalScope('mock', **self.vo),
                       'name': 'file-on-gcs_%s' % i,
                       'bytes': 1234,
                       'adler32': 'deadbeef',
                       'meta': {'events': 666}} for i in range(0, 3)]
        root = InternalAccount('root', **self.vo)
        add_replicas(rse_id=self.rse1_id,
                     files=self.files,
                     account=root,
                     ignore_availability=True)
        add_replicas(rse_id=self.rse2_id,
                     files=self.files,
                     account=root,
                     ignore_availability=True)

        def tearDown(self):
            delete_replicas(rse_id=self.rse1_id, files=self.files)
            delete_replicas(rse_id=self.rse2_id, files=self.files)
            del_rse(rse_id=self.rse1_id)
            del_rse(rse_id=self.rse2_id)
Пример #18
0
def mock_request(db_session, vo, source_rse, dest_rse, file):
    account = InternalAccount('root', vo=vo)

    def teardown(req):
        delete_replicas(rse_id=source_rse['id'], files=[file], session=req.db_session)

    add_replicas(rse_id=source_rse['id'], files=[file], account=account, session=db_session)
    with GeneratedRequest(
        scope=file['scope'],
        name=file['name'],
        dest_rse_id=dest_rse['id'],
        account=account,
        db_session=db_session,
        teardown_func=teardown,
    ) as rucio_request:
        yield rucio_request.db_object
Пример #19
0
    def setup(self):

        self.rc = client.ReplicaClient()
        self.rse1 = rse_name_generator()
        self.rse2 = rse_name_generator()
        add_rse(self.rse1)
        add_rse(self.rse2)

        add_protocol(self.rse1, {'scheme': 'https',
                                 'hostname': 'storage.googleapis.com',
                                 'port': 443,
                                 'prefix': '/atlas-europe-west1/',
                                 'impl': 'rucio.rse.protocols.gfal.Default',
                                 'domains': {
                                     'lan': {'read': 1, 'write': 1, 'delete': 1},
                                     'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}})

        add_protocol(self.rse2, {'scheme': 'https',
                                 'hostname': 'storage.googleapis.com',
                                 'port': 443,
                                 'prefix': '/atlas-europe-east1/',
                                 'impl': 'rucio.rse.protocols.gfal.Default',
                                 'domains': {
                                     'lan': {'read': 1, 'write': 1, 'delete': 1},
                                     'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}})

        # register some files there
        self.files = [{'scope': 'mock',
                       'name': 'file-on-gcs_%s' % i,
                       'bytes': 1234,
                       'adler32': 'deadbeef',
                       'meta': {'events': 666}} for i in range(0, 3)]
        add_replicas(rse=self.rse1,
                     files=self.files,
                     account='root',
                     ignore_availability=True)
        add_replicas(rse=self.rse2,
                     files=self.files,
                     account='root',
                     ignore_availability=True)

        def tearDown(self):
            delete_replicas(rse=self.rse1, files=self.files)
            delete_replicas(rse=self.rse2, files=self.files)
            del_rse(self.rse1)
            del_rse(self.rse2)
Пример #20
0
def test_list_archive_contents_transparently(rse_factory, replica_client, did_client, mock_scope, root_account):
    """ ARCHIVE (CORE): Transparent archive listing """

    rse, rse_id = rse_factory.make_xroot_rse()

    # register archive
    archive = {'scope': mock_scope, 'name': 'weighted.storage.cube.zip', 'type': 'FILE',
               'bytes': 2596, 'adler32': 'beefdead'}
    archive_client = archive.copy()
    archive_client['scope'] = archive_client['scope'].external

    add_replicas(rse_id=rse_id, files=[archive], account=root_account)

    # archived files with replicas
    files_with_replicas = [{'scope': mock_scope, 'name': 'witrep-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE',
                            'bytes': 1234, 'adler32': 'deadbeef'} for i in range(2)]
    files_with_replicas_client = []
    for f in files_with_replicas:
        new_file = f.copy()
        new_file['scope'] = new_file['scope'].external
        files_with_replicas_client.append(new_file)

    add_replicas(rse_id=rse_id, files=files_with_replicas, account=root_account)
    did_client.add_files_to_archive(scope=archive_client['scope'], name=archive_client['name'], files=files_with_replicas_client)

    res = [r['pfns'] for r in replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files_with_replicas_client],
                                                           resolve_archives=True)]
    assert len(res) == 2
    assert len(res[0]) == 2
    assert len(res[1]) == 2
    for r in res:
        for p in r:
            if r[p]['domain'] == 'zip':
                assert 'weighted.storage.cube.zip?xrdcl.unzip=witrep-' in p
            else:
                assert 'weighted.storage.cube.zip?xrdcl.unzip=witrep-' not in p

    # archived files without replicas
    files = [{'scope': mock_scope.external, 'name': 'norep-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE',
              'bytes': 1234, 'adler32': 'deadbeef'} for i in range(2)]
    did_client.add_files_to_archive(scope=archive_client['scope'], name=archive_client['name'], files=files)
    res = [r['pfns'] for r in replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in files],
                                                           resolve_archives=True)]
    assert len(res) == 2
    for r in res:
        assert 'weighted.storage.cube.zip?xrdcl.unzip=norep-' in list(r.keys())[0]
Пример #21
0
    def setUp(self):
        if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
            self.vo = {'vo': get_vo()}
        else:
            self.vo = {}

        self.root = InternalAccount('root', **self.vo)

        # add an S3 storage with a replica
        self.rc = client.ReplicaClient()
        self.rses3 = rse_name_generator()
        self.rses3_id = add_rse(self.rses3, **self.vo)
        add_protocol(self.rses3_id, {'scheme': 'https',
                                     'hostname': 'fake-rucio.s3-eu-south-8.amazonaws.com',
                                     'port': 443,
                                     'prefix': '/',
                                     'impl': 'rucio.rse.protocols.gfal.NoRename',
                                     'domains': {
                                         'lan': {'read': 1, 'write': 1, 'delete': 1},
                                         'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}})
        add_rse_attribute(rse_id=self.rses3_id, key='sign_url', value='s3')
        add_rse_attribute(rse_id=self.rses3_id, key='fts', value='localhost')
        self.files3 = [{'scope': InternalScope('mock', **self.vo), 'name': 'file-on-aws',
                        'bytes': 1234, 'adler32': 'deadbeef', 'meta': {'events': 123}}]
        add_replicas(rse_id=self.rses3_id, files=self.files3, account=self.root)

        # add a non-S3 storage with a replica
        self.rsenons3 = rse_name_generator()
        self.rsenons3_id = add_rse(self.rsenons3, **self.vo)
        add_protocol(self.rsenons3_id, {'scheme': 'https',
                                        'hostname': 'somestorage.ch',
                                        'port': 1094,
                                        'prefix': '/my/prefix',
                                        'impl': 'rucio.rse.protocols.gfal.Default',
                                        'domains': {
                                            'lan': {'read': 1, 'write': 1, 'delete': 1},
                                            'wan': {'read': 1, 'write': 1, 'delete': 1, 'third_party_copy': 1}}})
        add_rse_attribute(rse_id=self.rsenons3_id, key='fts', value='localhost')
        self.filenons3 = [{'scope': InternalScope('mock', **self.vo), 'name': 'file-on-storage',
                           'bytes': 1234, 'adler32': 'deadbeef', 'meta': {'events': 321}}]
        add_replicas(rse_id=self.rsenons3_id, files=self.filenons3, account=self.root)

        # set the distance both ways
        add_distance(self.rses3_id, self.rsenons3_id, ranking=1, agis_distance=1, geoip_distance=1)
        add_distance(self.rsenons3_id, self.rses3_id, ranking=1, agis_distance=1, geoip_distance=1)
Пример #22
0
def add_replicas(rse, files, issuer, ignore_availability=False):
    """
    Bulk add file replicas.

    :param rse: The RSE name.
    :param files: The list of files.
    :param issuer: The issuer account.
    :param ignore_availability: Ignore the RSE blacklisting.

    :returns: True is successful, False otherwise
    """
    validate_schema(name='dids', obj=files)

    kwargs = {'rse': rse}
    if not permission.has_permission(issuer=issuer, action='add_replicas', kwargs=kwargs):
        raise exception.AccessDenied('Account %s can not add file replicas on %s' % (issuer, rse))
    if not permission.has_permission(issuer=issuer, action='skip_availability_check', kwargs=kwargs):
        ignore_availability = False
    replica.add_replicas(rse=rse, files=files, account=issuer, ignore_availability=ignore_availability)
Пример #23
0
 def test_get_did_from_pfns_nondeterministic(self):
     """ REPLICA (CLIENT): Get list of DIDs associated to PFNs for non-deterministic sites"""
     rse = 'MOCK2'
     tmp_scope = 'mock'
     nbfiles = 3
     pfns = []
     input = {}
     rse_info = rsemgr.get_rse_info(rse)
     assert_equal(rse_info['deterministic'], False)
     files = [{
         'scope':
         tmp_scope,
         'name':
         'file_%s' % generate_uuid(),
         'bytes':
         1,
         'adler32':
         '0cc737eb',
         'pfn':
         'srm://mock2.com:8443/srm/managerv2?SFN=/rucio/tmpdisk/rucio_tests/%s/%s'
         % (tmp_scope, generate_uuid()),
         'meta': {
             'events': 10
         }
     } for _ in range(nbfiles)]
     for f in files:
         input[f['pfn']] = {'scope': f['scope'], 'name': f['name']}
     add_replicas(rse=rse,
                  files=files,
                  account='root',
                  ignore_availability=True)
     for replica in list_replicas(dids=[{
             'scope': f['scope'],
             'name': f['name'],
             'type': DIDType.FILE
     } for f in files],
                                  schemes=['srm'],
                                  ignore_availability=True):
         for rse in replica['rses']:
             pfns.extend(replica['rses'][rse])
     for result in self.replica_client.get_did_from_pfns(pfns, rse):
         pfn = result.keys()[0]
         assert_equal(input[pfn], result.values()[0])
Пример #24
0
    def test_list_replicas_all_states(self):
        """ REPLICA (CORE): list file replicas with all_states"""
        tmp_scope = 'mock'
        nbfiles = 13
        files = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1, 'adler32': '0cc737eb', 'meta': {'events': 10}} for i in range(nbfiles)]
        rses = ['MOCK', 'MOCK3']
        for rse in rses:
            add_replicas(rse=rse, files=files, account='root', ignore_availability=True)

        for file in files:
            update_replica_state('MOCK', tmp_scope, file['name'], ReplicaState.COPYING)

        replica_cpt = 0
        for replica in list_replicas(dids=[{'scope': f['scope'], 'name': f['name'], 'type': DIDType.FILE} for f in files], schemes=['srm'], all_states=True):
            assert_in('states', replica)
            assert_equal(replica['states']['MOCK'], str(ReplicaState.COPYING))
            assert_equal(replica['states']['MOCK3'], str(ReplicaState.AVAILABLE))
            replica_cpt += 1

        assert_equal(nbfiles, replica_cpt)
Пример #25
0
    def setup(self):
        """RucioCache (Func): Find necessary rse and dids """
        self.id = int(random.random() * 10000)
        self.rse_exist_volatile = 'RUCIO_CACHE_VOLATILE' + str(self.id)
        try:
            rse.add_rse(self.rse_exist_volatile, 'root', deterministic=True, volatile=True)
        except exception.Duplicate:
            logging.warning("rse RUCIO_CACHE_VOLATILE already there")

        self.rse_exist_novolatile = 'RUCIO_CACHE_NOVOLATILE' + str(self.id)
        try:
            rse.add_rse(self.rse_exist_novolatile, 'root', deterministic=True, volatile=False)
        except exception.Duplicate:
            logging.warning("rse RUCIO_CACHE_NOVOLATILE already there")

        self.rse_noExist = 'RUCIO_CACHE_NOEXIST' + str(self.id)
        dids = did.list_dids(scope='mock', filters={}, type='file')
        i = 0
        self.files_exist = []
        self.files_exist_wrong_meta = []
        self.file_replica_on_novolatile = []
        for _did in dids:
            if i < 2:
                i += 1
                meta = did.get_metadata(scope='mock', name=_did[0])
                self.files_exist.append({'scope': meta['scope'], 'name': meta['name'], 'bytes': meta['bytes'], "adler32": meta["adler32"]})
                self.files_exist_wrong_meta.append({'scope': meta['scope'], 'name': meta['name'], 'bytes': 12345678, "adler32": '12345678'})
            elif i < 3:
                meta = did.get_metadata(scope='mock', name=_did[0])
                file = {'scope': meta['scope'], 'name': meta['name'], 'bytes': meta['bytes'], "adler32": meta["adler32"]}
                self.file_replica_on_novolatile.append(file)
                replica.add_replicas(self.rse_exist_novolatile, [file], account='root')

        logging.debug("File Exists: %s " % self.files_exist)
        logging.debug("File Exists with wrong metadata: %s " % self.files_exist_wrong_meta)
        logging.debug("File Exists on volatie rses: " % self.file_replica_on_novolatile)

        self.files_noExist = [{'scope': 'mock', 'name': 'file_notexist', "bytes": 1, "adler32": "0cc737eb"}]
        logging.debug("File not Exists: %s " % self.files_noExist)
        self.account = 'root'
        self.lifetime = 2
Пример #26
0
def mock_request(db_session, vo, source_rse, dest_rse, file):
    account = InternalAccount('root', vo=vo)

    add_replicas(rse_id=source_rse['id'],
                 files=[file],
                 account=account,
                 session=db_session)

    request = models.Request(state=RequestState.PREPARING,
                             scope=file['scope'],
                             name=file['name'],
                             dest_rse_id=dest_rse['id'],
                             account=account)
    request.save(session=db_session)
    db_session.commit()

    yield request

    request.delete(session=db_session)
    delete_replicas(rse_id=source_rse['id'], files=[file], session=db_session)
    db_session.commit()
Пример #27
0
def test_list_archive_contents_at_rse(rse_factory, mock_scope, root_account, did_client, replica_client):
    """ ARCHIVE (CORE): Transparent archive listing at RSE """

    rse1, rse1_id = rse_factory.make_xroot_rse()
    rse2, rse2_id = rse_factory.make_xroot_rse()
    # register archive
    archive1 = {'scope': mock_scope, 'name': 'cube.1.zip', 'type': 'FILE', 'bytes': 2596, 'adler32': 'beefdead'}
    archive2 = {'scope': mock_scope, 'name': 'cube.2.zip', 'type': 'FILE', 'bytes': 5432, 'adler32': 'deadbeef'}
    add_replicas(rse_id=rse1_id, files=[archive1], account=root_account)
    add_replicas(rse_id=rse2_id, files=[archive2], account=root_account)

    # archived files with replicas
    archived_file = [{'scope': mock_scope.external, 'name': 'zippedfile-%i-%s' % (i, str(generate_uuid())), 'type': 'FILE',
                      'bytes': 4322, 'adler32': 'beefbeef'} for i in range(2)]
    did_client.add_files_to_archive(scope=mock_scope.external, name=archive1['name'], files=archived_file)
    did_client.add_files_to_archive(scope=mock_scope.external, name=archive2['name'], files=archived_file)

    res = [r['pfns'] for r in replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in archived_file],
                                                           rse_expression=rse1,
                                                           resolve_archives=True)]

    res = replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in archived_file], metalink=True, rse_expression=rse1, resolve_archives=True)
    assert rse1 in res
    assert rse2 not in res

    res = replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in archived_file], metalink=True, rse_expression=rse2, resolve_archives=True)
    assert rse1 not in res
    assert rse2 in res

    # if archive file is on a blacklisted RSE, it must not be listed
    both_rses = rse1 + '|' + rse2
    update_rse(rse1_id, {'availability_read': False})
    update_rse(rse2_id, {'availability_read': False})
    res = replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in archived_file], metalink=True, rse_expression=both_rses, resolve_archives=True)
    assert rse1 not in res
    assert rse2 not in res
    res = replica_client.list_replicas(dids=[{'scope': f['scope'], 'name': f['name']} for f in archived_file], metalink=True, rse_expression=both_rses, resolve_archives=True, unavailable=True)
    assert rse1 in res
    assert rse2 in res
Пример #28
0
def test_fk_error_on_source_creation(rse_factory, did_factory, root_account):
    """
    verify that ensure_db_sources correctly handles foreign key errors while creating sources
    """

    if get_session().bind.dialect.name == 'sqlite':
        pytest.skip('Will not run on sqlite')

    src_rse, src_rse_id = rse_factory.make_mock_rse()
    dst_rse, dst_rse_id = rse_factory.make_mock_rse()
    add_distance(src_rse_id, dst_rse_id, ranking=10)

    did = did_factory.random_did()
    file = {
        'scope': did['scope'],
        'name': did['name'],
        'type': 'FILE',
        'bytes': 1,
        'adler32': 'beefdead'
    }
    add_replicas(rse_id=src_rse_id, files=[file], account=root_account)
    rule_core.add_rule(dids=[did],
                       account=root_account,
                       copies=1,
                       rse_expression=dst_rse,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)

    requests, *_ = get_transfer_paths(rses=[src_rse_id, dst_rse_id])
    request_id, [transfer_path] = next(iter(requests.items()))

    transfer_path[0].rws.request_id = generate_uuid()
    to_submit, *_ = assign_paths_to_transfertool_and_create_hops(requests)
    assert not to_submit
Пример #29
0
    def test_delete_replicas(self):
        """ REPLICA (CORE): Delete replicas """
        tmp_scope = 'mock'
        nbfiles = 5
        files1 = [{
            'scope': tmp_scope,
            'name': 'file_%s' % generate_uuid(),
            'bytes': 1,
            'adler32': '0cc737eb',
            'meta': {
                'events': 10
            }
        } for _ in range(nbfiles)]
        add_replicas(rse='MOCK',
                     files=files1,
                     account='root',
                     ignore_availability=True)

        files2 = [{
            'scope': tmp_scope,
            'name': 'file_%s' % generate_uuid(),
            'bytes': 1,
            'adler32': '0cc737eb',
            'meta': {
                'events': 10
            }
        } for _ in range(nbfiles)]
        add_replicas(rse='MOCK',
                     files=files2,
                     account='root',
                     ignore_availability=True)
        add_replicas(rse='MOCK3',
                     files=files2,
                     account='root',
                     ignore_availability=True)

        delete_replicas(rse='MOCK', files=files1 + files2)

        for file in files1:
            with assert_raises(DataIdentifierNotFound):
                print(get_did(scope=file['scope'], name=file['name']))

        for file in files2:
            get_did(scope=file['scope'], name=file['name'])
Пример #30
0
        add_rse_attribute(rse=self.rse_with_proxy,
                          key='site',
                          value='APERTURE')

        self.files = [{
            'scope': 'mock',
            'name': 'half-life_%s' % i,
            'bytes': 1234L,
            'adler32': 'deadbeef',
            'meta': {
                'events': 666
            }
        } for i in xrange(1, 4)]
        for rse in [self.rse_with_proxy, self.rse_without_proxy]:
            add_replicas(rse=rse,
                         files=self.files,
                         account='root',
                         ignore_availability=True)

        add_protocol(
            self.rse_without_proxy, {
                'scheme': 'root',
                'hostname': 'root.blackmesa.com',
                'port': 1409,
                'prefix': '//training/facility/',
                'impl': 'rucio.rse.protocols.xrootd.Default',
                'domains': {
                    'lan': {
                        'read': 1,
                        'write': 1,
                        'delete': 1
                    },
Пример #31
0
def add_files(lfns, account, ignore_availability, session=None):
    """
    Bulk add files :
    - Create the file and replica.
    - If doesn't exist create the dataset containing the file as well as a rule on the dataset on ANY sites.
    - Create all the ascendants of the dataset if they do not exist

    :param lfns: List of lfn (dictionary {'lfn': <lfn>, 'rse': <rse>, 'bytes': <bytes>, 'adler32': <adler32>, 'guid': <guid>, 'pfn': <pfn>}
    :param issuer: The issuer account.
    :param ignore_availability: A boolean to ignore blocklisted sites.
    :session: The session used
    """
    attachments = []
    # The list of scopes is necessary for the extract_scope
    scopes = list_scopes(session=session)
    scopes = [scope.external for scope in scopes]
    exist_lfn = []
    for lfn in lfns:
        # First check if the file exists
        filename = lfn['lfn']
        lfn_scope, _ = extract_scope(filename, scopes)
        lfn_scope = InternalScope(lfn_scope)

        exists, did_type = _exists(lfn_scope, filename)
        if exists:
            continue

        # Get all the ascendants of the file
        lfn_split = filename.split('/')
        lpns = ["/".join(lfn_split[:idx]) for idx in range(2, len(lfn_split))]
        lpns.reverse()
        print(lpns)

        # The parent must be a dataset. Register it as well as the rule
        dsn_name = lpns[0]
        dsn_scope, _ = extract_scope(dsn_name, scopes)
        dsn_scope = InternalScope(dsn_scope)
        exists, did_type = _exists(dsn_scope, dsn_name)
        if exists and did_type == DIDType.CONTAINER:
            raise UnsupportedOperation('Cannot create %s as dataset' %
                                       dsn_name)
        if (dsn_name not in exist_lfn) and not exists:
            print('Will create %s' % dsn_name)
            add_did(scope=dsn_scope,
                    name=dsn_name,
                    type=DIDType.DATASET,
                    account=InternalAccount(account),
                    statuses=None,
                    meta=None,
                    rules=[{
                        'copies': 1,
                        'rse_expression': 'ANY=true',
                        'weight': None,
                        'account': InternalAccount(account),
                        'lifetime': None,
                        'grouping': 'NONE'
                    }],
                    lifetime=None,
                    dids=None,
                    rse_id=None,
                    session=session)
            exist_lfn.append(dsn_name)
            parent_name = lpns[1]
            parent_scope, _ = extract_scope(parent_name, scopes)
            parent_scope = InternalScope(parent_scope)
            attachments.append({
                'scope': parent_scope,
                'name': parent_name,
                'dids': [{
                    'scope': dsn_scope,
                    'name': dsn_name
                }]
            })

        # Register the file
        rse_id = lfn.get('rse_id', None)
        if not rse_id:
            raise InvalidType('Missing rse_id')
        bytes = lfn.get('bytes', None)
        guid = lfn.get('guid', None)
        adler32 = lfn.get('adler32', None)
        pfn = lfn.get('pfn', None)
        files = {
            'scope': lfn_scope,
            'name': filename,
            'bytes': bytes,
            'adler32': adler32
        }
        if pfn:
            files['pfn'] = str(pfn)
        if guid:
            files['meta'] = {'guid': guid}
        add_replicas(rse_id=rse_id,
                     files=[files],
                     dataset_meta=None,
                     account=InternalAccount(account),
                     ignore_availability=ignore_availability,
                     session=session)
        add_rule(dids=[{
            'scope': lfn_scope,
            'name': filename
        }],
                 account=InternalAccount(account),
                 copies=1,
                 rse_expression=lfn['rse'],
                 grouping=None,
                 weight=None,
                 lifetime=86400,
                 locked=None,
                 subscription_id=None,
                 session=session)
        attachments.append({
            'scope': dsn_scope,
            'name': dsn_name,
            'dids': [{
                'scope': lfn_scope,
                'name': filename
            }]
        })

        # Now loop over the ascendants of the dataset and created them
        for lpn in lpns[1:]:
            child_scope, _ = extract_scope(lpn, scopes)
            child_scope = InternalScope(child_scope)
            exists, did_type = _exists(child_scope, lpn)
            if exists and did_type == DIDType.DATASET:
                raise UnsupportedOperation('Cannot create %s as container' %
                                           lpn)
            if (lpn not in exist_lfn) and not exists:
                print('Will create %s' % lpn)
                add_did(scope=child_scope,
                        name=lpn,
                        type=DIDType.CONTAINER,
                        account=InternalAccount(account),
                        statuses=None,
                        meta=None,
                        rules=None,
                        lifetime=None,
                        dids=None,
                        rse_id=None,
                        session=session)
                exist_lfn.append(lpn)
                parent_name = lpns[lpns.index(lpn) + 1]
                parent_scope, _ = extract_scope(parent_name, scopes)
                parent_scope = InternalScope(parent_scope)
                attachments.append({
                    'scope':
                    parent_scope,
                    'name':
                    parent_name,
                    'dids': [{
                        'scope': child_scope,
                        'name': lpn
                    }]
                })
    # Finally attach everything
    attach_dids_to_dids(attachments,
                        account=InternalAccount(account),
                        ignore_duplicate=True,
                        session=session)
Пример #32
0
class TestReplicaCore:

    def test_update_replicas_paths(self):
        """ REPLICA (CORE): Force update the replica path """
        tmp_scope = 'mock'
        nbfiles = 5
        rse_info = rsemgr.get_rse_info('MOCK')
        files = [{'scope': tmp_scope,
                  'name': 'file_%s' % generate_uuid(),
                  'pfn': 'srm://mock2.com:8443/srm/managerv2?SFN=/rucio/tmpdisk/rucio_tests//does/not/really/matter/where',
                  'bytes': 1L,
                  'adler32': '0cc737eb',
                  'meta': {'events': 10},
                  'rse_id': rse_info['id'],
                  'path': '/does/not/really/matter/where'} for i in xrange(nbfiles)]
        add_replicas(rse='MOCK2', files=files, account='root', ignore_availability=True)
        update_replicas_paths(files)
        for replica in list_replicas(dids=[{'scope': f['scope'],
                                            'name': f['name'],
                                            'type': DIDType.FILE} for f in files],
                                     schemes=['srm']):
            # force the changed string - if we look it up from the DB, then we're not testing anything :-D
            assert_equal(replica['rses']['MOCK2'][0], 'srm://mock2.com:8443/srm/managerv2?SFN=/rucio/tmpdisk/rucio_tests//does/not/really/matter/where')

    def test_add_list_bad_replicas(self):
        """ REPLICA (CORE): Add bad replicas and list them"""
        tmp_scope = 'mock'
        nbfiles = 5
        # Adding replicas to deterministic RSE
        files = [{'scope': tmp_scope, 'name': 'file_%s' % generate_uuid(), 'bytes': 1L, 'adler32': '0cc737eb', 'meta': {'events': 10}} for i in xrange(nbfiles)]
        rse_info = rsemgr.get_rse_info('MOCK')
Пример #33
0
def protocols_setup(vo):
    rse_info = copy.deepcopy(base_rse_info)

    files = [{
        'scope': InternalScope('mock', vo=vo),
        'name': 'element_0',
        'bytes': 1234,
        'adler32': 'deadbeef'
    }]
    root = InternalAccount('root', vo=vo)

    for idx in range(len(rse_info)):
        rse_info[idx]['name'] = '%s_%s' % (rse_info[idx]['site'],
                                           rse_name_generator())
        rse_info[idx]['id'] = add_rse(rse_info[idx]['name'], vo=vo)
        add_rse_attribute(rse_id=rse_info[idx]['id'],
                          key='site',
                          value=base_rse_info[idx]['site'])
        add_replicas(rse_id=rse_info[idx]['id'], files=files, account=root)

    # invalidate cache for parse_expression('site=…')
    rse_expression_parser.REGION.invalidate()

    # check sites
    for idx in range(len(rse_info)):
        site_rses = rse_expression_parser.parse_expression(
            'site=' + base_rse_info[idx]['site'])
        assert len(site_rses) > 0
        assert rse_info[idx]['id'] in [rse['id'] for rse in site_rses]

    add_protocol(
        rse_info[0]['id'], {
            'scheme': schemes[0],
            'hostname': ('root.%s' % base_rse_info[0]['address']),
            'port': 1409,
            'prefix': '//test/chamber/',
            'impl': 'rucio.rse.protocols.xrootd.Default',
            'domains': {
                'lan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                },
                'wan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                }
            }
        })
    add_protocol(
        rse_info[0]['id'], {
            'scheme': schemes[2],
            'hostname': ('davs.%s' % base_rse_info[0]['address']),
            'port': 443,
            'prefix': '/test/chamber/',
            'impl': 'rucio.rse.protocols.gfal.Default',
            'domains': {
                'lan': {
                    'read': 2,
                    'write': 2,
                    'delete': 2
                },
                'wan': {
                    'read': 2,
                    'write': 2,
                    'delete': 2
                }
            }
        })
    add_protocol(
        rse_info[0]['id'], {
            'scheme': schemes[1],
            'hostname': ('gsiftp.%s' % base_rse_info[0]['address']),
            'port': 8446,
            'prefix': '/test/chamber/',
            'impl': 'rucio.rse.protocols.gfal.Default',
            'domains': {
                'lan': {
                    'read': 0,
                    'write': 0,
                    'delete': 0
                },
                'wan': {
                    'read': 3,
                    'write': 3,
                    'delete': 3
                }
            }
        })

    add_protocol(
        rse_info[1]['id'], {
            'scheme': schemes[1],
            'hostname': ('gsiftp.%s' % base_rse_info[1]['address']),
            'port': 8446,
            'prefix': '/lambda/complex/',
            'impl': 'rucio.rse.protocols.gfal.Default',
            'domains': {
                'lan': {
                    'read': 2,
                    'write': 2,
                    'delete': 2
                },
                'wan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                }
            }
        })
    add_protocol(
        rse_info[1]['id'], {
            'scheme': schemes[2],
            'hostname': ('davs.%s' % base_rse_info[1]['address']),
            'port': 443,
            'prefix': '/lambda/complex/',
            'impl': 'rucio.rse.protocols.gfal.Default',
            'domains': {
                'lan': {
                    'read': 0,
                    'write': 0,
                    'delete': 0
                },
                'wan': {
                    'read': 2,
                    'write': 2,
                    'delete': 2
                }
            }
        })
    add_protocol(
        rse_info[1]['id'], {
            'scheme': schemes[0],
            'hostname': ('root.%s' % base_rse_info[1]['address']),
            'port': 1409,
            'prefix': '//lambda/complex/',
            'impl': 'rucio.rse.protocols.xrootd.Default',
            'domains': {
                'lan': {
                    'read': 1,
                    'write': 1,
                    'delete': 1
                },
                'wan': {
                    'read': 3,
                    'write': 3,
                    'delete': 3
                }
            }
        })

    yield {'files': files, 'rse_info': rse_info}

    for info in rse_info:
        delete_replicas(rse_id=info['id'], files=files)
        del_rse_attribute(rse_id=info['id'], key='site')
        del_rse(info['id'])