示例#1
0
def test_multihop_requests_created(rse_factory, did_factory, root_account,
                                   core_config_mock, caches_mock):
    """
    Ensure that multihop transfers are handled and intermediate request correctly created
    """
    rs0_name, src_rse_id = rse_factory.make_posix_rse()
    _, intermediate_rse_id = rse_factory.make_posix_rse()
    dst_rse_name, dst_rse_id = rse_factory.make_posix_rse()
    rse_core.add_rse_attribute(intermediate_rse_id, 'available_for_multihop',
                               True)

    add_distance(src_rse_id, intermediate_rse_id, ranking=10)
    add_distance(intermediate_rse_id, dst_rse_id, ranking=10)

    did = did_factory.upload_test_file(rs0_name)
    rule_core.add_rule(dids=[did],
                       account=root_account,
                       copies=1,
                       rse_expression=dst_rse_name,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)

    transfers, _reqs_no_source, _reqs_scheme_mismatch, _reqs_only_tape_source = get_transfer_requests_and_source_replicas(
        rses=rse_factory.created_rses)
    assert len(transfers) == 2
    # ensure that one transfer is the parent of the other
    assert any(t for t in transfers.values()
               if t.get('parent_request') in transfers)
    # the intermediate request was correctly created
    assert request_core.get_request_by_did(rse_id=intermediate_rse_id, **did)
示例#2
0
    def test_judge_add_files_to_dataset(self):
        """ JUDGE EVALUATOR: Test the judge when adding files to dataset"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')

        # Add a first rule to the DS
        add_rule(dids=[{
            'scope': scope,
            'name': dataset
        }],
                 account='jdoe',
                 copies=2,
                 rse_expression=self.T1,
                 grouping='DATASET',
                 weight=None,
                 lifetime=None,
                 locked=False,
                 subscription_id=None)

        attach_dids(scope, dataset, files, 'jdoe')
        re_evaluator(once=True)

        files = create_files(3, scope, self.rse1)
        attach_dids(scope, dataset, files, 'jdoe')

        # Fake judge
        re_evaluator(once=True)

        # Check if the Locks are created properly
        for file in files:
            assert (len(
                get_replica_locks(scope=file['scope'],
                                  name=file['name'])) == 2)
示例#3
0
def test_multihop_requests_created(rse_factory, did_factory, root_account,
                                   core_config_mock, caches_mock):
    """
    Ensure that multihop transfers are handled and intermediate request correctly created
    """
    rs0_name, src_rse_id = rse_factory.make_posix_rse()
    _, intermediate_rse_id = rse_factory.make_posix_rse()
    dst_rse_name, dst_rse_id = rse_factory.make_posix_rse()
    rse_core.add_rse_attribute(intermediate_rse_id, 'available_for_multihop',
                               True)

    add_distance(src_rse_id, intermediate_rse_id, ranking=10)
    add_distance(intermediate_rse_id, dst_rse_id, ranking=10)

    did = did_factory.upload_test_file(rs0_name)
    rule_core.add_rule(dids=[did],
                       account=root_account,
                       copies=1,
                       rse_expression=dst_rse_name,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)

    [[_, [transfer]]
     ] = next_transfers_to_submit(rses=rse_factory.created_rses).items()
    # the intermediate request was correctly created
    assert request_core.get_request_by_did(rse_id=intermediate_rse_id, **did)
示例#4
0
    def test_judge_evaluate_detach_datasetlock(self):
        """ JUDGE EVALUATOR: Test if the a datasetlock is detached correctly when removing a dataset from a container"""
        re_evaluator(once=True)

        scope = 'mock'
        files = create_files(3, scope, self.rse1, bytes=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        container = 'container_' + str(uuid())
        add_did(scope, container, DIDType.from_sym('CONTAINER'), 'jdoe')
        attach_dids(scope, container, [{'scope': scope, 'name': dataset}], 'jdoe')

        # Add a rule to the Container
        add_rule(dids=[{'scope': scope, 'name': container}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)

        # Check if the datasetlock is there
        locks = [ds_lock for ds_lock in get_dataset_locks(scope=scope, name=dataset)]
        assert(len(locks) > 0)

        detach_dids(scope, container, [{'scope': scope, 'name': dataset}])

        # Fake judge
        re_evaluator(once=True)

        locks = [ds_lock for ds_lock in get_dataset_locks(scope=scope, name=dataset)]
        assert(len(locks) == 0)
示例#5
0
    def test_account_counter_judge_evaluate_detach(self):
        """ JUDGE EVALUATOR: Test if the account counter is updated correctly when a file is removed from a DS"""
        re_evaluator(once=True)
        account_update(once=True)

        scope = 'mock'
        files = create_files(3, scope, self.rse1, bytes=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        # Add a first rule to the DS
        add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)

        account_update(once=True)

        account_counter_before = get_counter(self.rse1_id, 'jdoe')

        detach_dids(scope, dataset, [files[0]])

        # Fake judge
        re_evaluator(once=True)
        account_update(once=True)

        account_counter_after = get_counter(self.rse1_id, 'jdoe')
        assert(account_counter_before['bytes'] - 100 == account_counter_after['bytes'])
        assert(account_counter_before['files'] - 1 == account_counter_after['files'])
示例#6
0
    def test_repair_a_rule_with_source_replica_expression(self):
        """ JUDGE EVALUATOR: Test the judge when a with two rules with source_replica_expression"""
        scope = 'mock'
        files = create_files(3, scope, self.rse4)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        # Add a first rule to the DS
        rule_id1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0]
        rule_id2 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse3, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, source_replica_expression=self.rse1)[0]

        assert(RuleState.REPLICATING == get_rule(rule_id1)['state'])
        assert(RuleState.STUCK == get_rule(rule_id2)['state'])

        successful_transfer(scope=scope, name=files[0]['name'], rse_id=self.rse1_id, nowait=False)
        successful_transfer(scope=scope, name=files[1]['name'], rse_id=self.rse1_id, nowait=False)
        successful_transfer(scope=scope, name=files[2]['name'], rse_id=self.rse1_id, nowait=False)
        # Also make replicas AVAILABLE
        session = get_session()
        replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[0]['name'], rse_id=self.rse1_id).one()
        replica.state = ReplicaState.AVAILABLE
        replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[1]['name'], rse_id=self.rse1_id).one()
        replica.state = ReplicaState.AVAILABLE
        replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[2]['name'], rse_id=self.rse1_id).one()
        replica.state = ReplicaState.AVAILABLE
        session.commit()

        rule_repairer(once=True)

        assert(RuleState.OK == get_rule(rule_id1)['state'])
        assert(RuleState.REPLICATING == get_rule(rule_id2)['state'])
示例#7
0
def test_multihop_concurrent_submitters(rse_factory, did_factory, root_account, core_config_mock, caches_mock):
    """
    Ensure that multiple concurrent submitters on the same multi-hop don't result in an undesired database state
    """
    src_rse, src_rse_id = rse_factory.make_posix_rse()
    jump_rse, jump_rse_id = rse_factory.make_posix_rse()
    dst_rse, dst_rse_id = rse_factory.make_posix_rse()
    rse_core.add_rse_attribute(jump_rse_id, 'available_for_multihop', True)

    add_distance(src_rse_id, jump_rse_id, ranking=10)
    add_distance(jump_rse_id, dst_rse_id, ranking=10)

    did = did_factory.upload_test_file(src_rse)
    rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)

    nb_threads = 9
    nb_executions = 18
    with ThreadPoolExecutor(max_workers=nb_threads) as executor:
        futures = [executor.submit(next_transfers_to_submit, rses=rse_factory.created_rses) for _ in range(nb_executions)]
        for f in futures:
            try:
                f.result()
            except Exception:
                pass

    jmp_request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)
    dst_request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)
    assert jmp_request['state'] == dst_request['state'] == RequestState.QUEUED
    assert jmp_request['attributes']['source_replica_expression'] == src_rse
    assert jmp_request['attributes']['initial_request_id'] == dst_request['id']
    assert jmp_request['attributes']['next_hop_request_id'] == dst_request['id']
示例#8
0
    def test_list_rules_states(self):
        """ SUBSCRIPTION (API): Test listing of rule states for subscription """
        tmp_scope = 'mock_' + uuid()[:8]
        add_scope(tmp_scope, 'root')
        site_a = 'RSE%s' % uuid().upper()
        site_b = 'RSE%s' % uuid().upper()

        add_rse(site_a)
        add_rse(site_b)

        # add a new dataset
        dsn = 'dataset-%s' % uuid()
        add_did(scope=tmp_scope, name=dsn,
                type=DIDType.DATASET, account='root')

        subscription_name = uuid()
        id = add_subscription(name=subscription_name, account='root', filter={'account': 'root'},
                              replication_rules=[(1, 'T1_DATADISK', False, True)], lifetime=100000, retroactive=0, dry_run=0, comments='This is a comment')

        subscriptions = list_subscriptions(name=subscription_name, account='root')
        # workaround until add_subscription returns the id
        id = None
        for s in subscriptions:
            id = s['id']

        # Add two rules
        add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=id)
        add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=id)

        for r in list_subscription_rule_states(account='root', name=subscription_name):
            assert_equal(r[3], 2)
    def test_judge_evaluate_detach_datasetlock(self):
        """ JUDGE EVALUATOR: Test if the a datasetlock is detached correctly when removing a dataset from a container"""
        re_evaluator(once=True, did_limit=1000)

        scope = InternalScope('mock', **self.vo)
        files = create_files(3, scope, self.rse1_id, bytes_=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.DATASET, self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)

        container = 'container_' + str(uuid())
        add_did(scope, container, DIDType.CONTAINER, self.jdoe)
        attach_dids(scope, container, [{'scope': scope, 'name': dataset}], self.jdoe)

        # Add a rule to the Container
        add_rule(dids=[{'scope': scope, 'name': container}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)

        # Check if the datasetlock is there
        locks = [ds_lock for ds_lock in get_dataset_locks(scope=scope, name=dataset)]
        assert(len(locks) > 0)

        detach_dids(scope, container, [{'scope': scope, 'name': dataset}])

        # Fake judge
        re_evaluator(once=True, did_limit=1000)

        locks = [ds_lock for ds_lock in get_dataset_locks(scope=scope, name=dataset)]
        assert(len(locks) == 0)
示例#10
0
    def test_add_rule_container_dataset_with_weights(self):
        """ REPLICATION RULE (CORE): Add a replication rule on a container, DATASET Grouping, WEIGHTS"""
        scope = 'mock'
        container = 'container_' + str(uuid())
        add_did(scope, container, DIDType.from_sym('CONTAINER'), 'jdoe')
        all_files = []
        dataset_files = []
        for i in xrange(3):
            files = create_files(3, scope, self.rse1)
            all_files.extend(files)
            dataset = 'dataset_' + str(uuid())
            add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
            attach_dids(scope, dataset, files, 'jdoe')
            attach_dids(scope, container, [{'scope': scope, 'name': dataset}], 'jdoe')
            dataset_files.append({'scope': scope, 'name': dataset, 'files': files})

        add_rule(dids=[{'scope': scope, 'name': container}], account='jdoe', copies=2, rse_expression=self.T1, grouping='DATASET', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)

        t1 = set([self.rse1_id, self.rse3_id, self.rse5_id])
        for dataset in dataset_files:
            first_locks = None
            for file in dataset['files']:
                if first_locks is None:
                    first_locks = set([lock['rse_id'] for lock in get_replica_locks(scope=file['scope'], name=file['name'])])
                rse_locks = set([lock['rse_id'] for lock in get_replica_locks(scope=file['scope'], name=file['name'])])
                assert(len(t1.intersection(rse_locks)) == 2)
                assert(len(first_locks.intersection(rse_locks)) == 2)
                assert_in(self.rse1_id, rse_locks)
示例#11
0
    def test_account_counter_judge_evaluate_detach(self):
        """ JUDGE EVALUATOR: Test if the account counter is updated correctly when a file is removed from a DS"""
        re_evaluator(once=True, did_limit=1000)
        account_update(once=True)

        scope = InternalScope('mock', **self.vo)
        files = create_files(3, scope, self.rse1_id, bytes_=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.DATASET, self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)

        # Add a first rule to the DS
        add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)

        account_update(once=True)

        account_counter_before = get_usage(self.rse1_id, self.jdoe)

        detach_dids(scope, dataset, [files[0]])

        # Fake judge
        re_evaluator(once=True, did_limit=1000)
        account_update(once=True)

        account_counter_after = get_usage(self.rse1_id, self.jdoe)
        assert(account_counter_before['bytes'] - 100 == account_counter_after['bytes'])
        assert(account_counter_before['files'] - 1 == account_counter_after['files'])
示例#12
0
    def test_list_rules_states(self):
        """ SUBSCRIPTION (API): Test listing of rule states for subscription """
        tmp_scope = 'mock_' + uuid()[:8]
        add_scope(tmp_scope, 'root')
        site_a = 'RSE%s' % uuid().upper()
        site_b = 'RSE%s' % uuid().upper()

        add_rse(site_a)
        add_rse(site_b)

        # Add quota
        set_account_limit('root', get_rse_id(site_a), -1)
        set_account_limit('root', get_rse_id(site_b), -1)

        # add a new dataset
        dsn = 'dataset-%s' % uuid()
        add_did(scope=tmp_scope, name=dsn,
                type=DIDType.DATASET, account='root')

        subscription_name = uuid()
        subid = add_subscription(name=subscription_name, account='root', filter={'account': ['root', ], 'scope': [tmp_scope, ]},
                                 replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}], lifetime=100000, retroactive=0, dry_run=0, comments='This is a comment', issuer='root')

        # Add two rules
        add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid)
        add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid)

        for rule in list_subscription_rule_states(account='root', name=subscription_name):
            assert_equal(rule[3], 2)
示例#13
0
    def test_judge_add_files_to_dataset_rule_on_container(self):
        """ JUDGE EVALUATOR: Test the judge when attaching file to dataset with rule on two levels of containers"""
        scope = InternalScope('mock', **self.vo)
        files = create_files(3, scope, self.rse1_id)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.DATASET, self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)

        parent_container = 'dataset_' + str(uuid())
        add_did(scope, parent_container, DIDType.CONTAINER, self.jdoe)
        attach_dids(scope, parent_container, [{'scope': scope, 'name': dataset}], self.jdoe)

        parent_parent_container = 'dataset_' + str(uuid())
        add_did(scope, parent_parent_container, DIDType.CONTAINER, self.jdoe)
        attach_dids(scope, parent_parent_container, [{'scope': scope, 'name': parent_container}], self.jdoe)

        # Add a first rule to the DS
        add_rule(dids=[{'scope': scope, 'name': parent_parent_container}], account=self.jdoe, copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)

        # Fake judge
        re_evaluator(once=True, did_limit=1000)

        # Check if the Locks are created properly
        for file in files:
            assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2)

        # create more files and attach them
        more_files = create_files(3, scope, self.rse1_id)
        attach_dids(scope, dataset, more_files, self.jdoe)
        re_evaluator(once=True, did_limit=1000)
        # Check if the Locks are created properly
        for file in more_files:
            assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2)
示例#14
0
    def test_repair_a_rule_with_source_replica_expression(self):
        """ JUDGE EVALUATOR: Test the judge when a with two rules with source_replica_expression"""
        scope = InternalScope('mock', **self.vo)
        files = create_files(3, scope, self.rse4_id)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.DATASET, self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)

        # Add a first rule to the DS
        rule_id1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0]
        rule_id2 = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse3, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, source_replica_expression=self.rse1)[0]

        assert(RuleState.REPLICATING == get_rule(rule_id1)['state'])
        assert(RuleState.STUCK == get_rule(rule_id2)['state'])

        successful_transfer(scope=scope, name=files[0]['name'], rse_id=self.rse1_id, nowait=False)
        successful_transfer(scope=scope, name=files[1]['name'], rse_id=self.rse1_id, nowait=False)
        successful_transfer(scope=scope, name=files[2]['name'], rse_id=self.rse1_id, nowait=False)
        # Also make replicas AVAILABLE
        session = get_session()
        replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[0]['name'], rse_id=self.rse1_id).one()
        replica.state = ReplicaState.AVAILABLE
        replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[1]['name'], rse_id=self.rse1_id).one()
        replica.state = ReplicaState.AVAILABLE
        replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[2]['name'], rse_id=self.rse1_id).one()
        replica.state = ReplicaState.AVAILABLE
        session.commit()

        rule_repairer(once=True)

        assert(RuleState.OK == get_rule(rule_id1)['state'])
        assert(RuleState.REPLICATING == get_rule(rule_id2)['state'])
示例#15
0
    def test_judge_expire_rule_with_child_rule(self):
        """ JUDGE CLEANER: Test the judge when deleting expired rules with child rules"""
        scope = InternalScope('mock', **self.vo)
        files = create_files(3, scope, self.rse1_id)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)

        rule_id = add_rule(dids=[{
            'scope': scope,
            'name': dataset
        }],
                           account=self.jdoe,
                           copies=1,
                           rse_expression=self.rse1,
                           grouping='NONE',
                           weight='fakeweight',
                           lifetime=None,
                           locked=False,
                           subscription_id=None)[0]
        child_rule = add_rule(dids=[{
            'scope': scope,
            'name': dataset
        }],
                              account=self.jdoe,
                              copies=1,
                              rse_expression=self.rse3,
                              grouping='NONE',
                              weight='fakeweight',
                              lifetime=-3,
                              locked=False,
                              subscription_id=None)[0]
        update_rule(rule_id, {'child_rule_id': child_rule})

        rule_cleaner(once=True)
示例#16
0
def test_fts_recoverable_failures_handled_on_multihop(
        vo, did_factory, root_account, replica_client, file_factory,
        core_config_mock, caches_mock):
    """
    Verify that the poller correctly handles recoverable FTS job failures
    """
    src_rse = 'XRD1'
    src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
    jump_rse = 'XRD3'
    jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)
    dst_rse = 'XRD4'
    dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)

    all_rses = [src_rse_id, jump_rse_id, dst_rse_id]

    # Create and upload a real file, but register it with wrong checksum. This will trigger
    # a FTS "Recoverable" failure on checksum validation
    local_file = file_factory.file_generator()
    did = did_factory.random_did()
    did_factory.upload_client.upload([{
        'path': local_file,
        'rse': src_rse,
        'did_scope': did['scope'].external,
        'did_name': did['name'],
        'no_register': True,
    }])
    replica_client.add_replicas(rse=src_rse,
                                files=[{
                                    'scope': did['scope'].external,
                                    'name': did['name'],
                                    'bytes': 1,
                                    'adler32': 'aaaaaaaa'
                                }])

    rule_core.add_rule(dids=[did],
                       account=root_account,
                       copies=1,
                       rse_expression=dst_rse,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)
    submitter(once=True,
              rses=[{
                  'id': rse_id
              } for rse_id in all_rses],
              group_bulk=2,
              partition_wait_time=None,
              transfertype='single',
              filter_transfertool=None)

    request = __wait_for_request_state(dst_rse_id=dst_rse_id,
                                       state=RequestState.FAILED,
                                       **did)
    assert request['state'] == RequestState.FAILED
    request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)
    assert request['state'] == RequestState.FAILED
示例#17
0
def test_multihop_receiver_on_success(vo, did_factory, root_account,
                                      core_config_mock, caches_mock):
    """
    Verify that the receiver correctly handles successful multihop jobs
    """
    receiver_thread = threading.Thread(target=receiver,
                                       kwargs={
                                           'id': 0,
                                           'full_mode': True,
                                           'all_vos': True,
                                           'total_threads': 1
                                       })
    receiver_thread.start()

    try:
        src_rse = 'XRD1'
        src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
        jump_rse = 'XRD3'
        jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)
        dst_rse = 'XRD4'
        dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)

        all_rses = [src_rse_id, jump_rse_id, dst_rse_id]

        did = did_factory.upload_test_file(src_rse)
        rule_core.add_rule(dids=[did],
                           account=root_account,
                           copies=1,
                           rse_expression=dst_rse,
                           grouping='ALL',
                           weight=None,
                           lifetime=None,
                           locked=False,
                           subscription_id=None)
        submitter(once=True,
                  rses=[{
                      'id': rse_id
                  } for rse_id in all_rses],
                  group_bulk=2,
                  partition_wait_time=None,
                  transfertype='single',
                  filter_transfertool=None)

        request = __wait_for_request_state(dst_rse_id=jump_rse_id,
                                           state=RequestState.DONE,
                                           run_poller=False,
                                           **did)
        assert request['state'] == RequestState.DONE
        request = __wait_for_request_state(dst_rse_id=dst_rse_id,
                                           state=RequestState.DONE,
                                           run_poller=False,
                                           **did)
        assert request['state'] == RequestState.DONE
    finally:
        receiver_graceful_stop.set()
        receiver_thread.join(timeout=5)
        receiver_graceful_stop.clear()
示例#18
0
def test_multisource(vo, did_factory, root_account, replica_client,
                     core_config_mock, caches_mock):
    src_rse1 = 'XRD4'
    src_rse1_id = rse_core.get_rse_id(rse=src_rse1, vo=vo)
    src_rse2 = 'XRD1'
    src_rse2_id = rse_core.get_rse_id(rse=src_rse2, vo=vo)
    dst_rse = 'XRD3'
    dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)

    all_rses = [src_rse1_id, src_rse2_id, dst_rse_id]

    # Add a good replica on the RSE which has a higher distance ranking
    did = did_factory.upload_test_file(src_rse1)
    # Add non-existing replica which will fail during multisource transfers on the RSE with lower cost (will be the preferred source)
    replica_client.add_replicas(rse=src_rse2,
                                files=[{
                                    'scope': did['scope'].external,
                                    'name': did['name'],
                                    'bytes': 1,
                                    'adler32': 'aaaaaaaa'
                                }])

    rule_core.add_rule(dids=[did],
                       account=root_account,
                       copies=1,
                       rse_expression=dst_rse,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)
    submitter(once=True,
              rses=[{
                  'id': rse_id
              } for rse_id in all_rses],
              group_bulk=2,
              partition_wait_time=None,
              transfertype='single',
              filter_transfertool=None)

    @read_session
    def __source_exists(src_rse_id, scope, name, session=None):
        return session.query(models.Source) \
            .filter(models.Source.rse_id == src_rse_id) \
            .filter(models.Source.scope == scope) \
            .filter(models.Source.name == name) \
            .count() != 0

    # Entries in the source table must be created for both sources of the multi-source transfer
    assert __source_exists(src_rse_id=src_rse1_id, **did)
    assert __source_exists(src_rse_id=src_rse2_id, **did)

    replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did)
    assert replica['state'] == ReplicaState.AVAILABLE
    assert not __source_exists(src_rse_id=src_rse1_id, **did)
    assert not __source_exists(src_rse_id=src_rse2_id, **did)
示例#19
0
def test_request_submitted_in_order(rse_factory, did_factory, root_account):

    src_rses = [rse_factory.make_posix_rse() for _ in range(2)]
    dst_rses = [rse_factory.make_posix_rse() for _ in range(3)]
    for _, src_rse_id in src_rses:
        for _, dst_rse_id in dst_rses:
            distance_core.add_distance(src_rse_id=src_rse_id, dest_rse_id=dst_rse_id, ranking=10)
            distance_core.add_distance(src_rse_id=dst_rse_id, dest_rse_id=src_rse_id, ranking=10)

    # Create a certain number of files on source RSEs with replication rules towards random destination RSEs
    nb_files = 15
    dids = []
    requests = []
    src_rses_iterator = itertools.cycle(src_rses)
    dst_rses_iterator = itertools.cycle(dst_rses)
    for _ in range(nb_files):
        src_rse_name, src_rse_id = next(src_rses_iterator)
        dst_rse_name, dst_rse_id = next(dst_rses_iterator)
        did = did_factory.upload_test_file(rse_name=src_rse_name)
        rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse_name, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)
        requests.append(request_core.get_request_by_did(rse_id=dst_rse_id, **did))
        dids.append(did)

    # Forge request creation time to a random moment in the past hour
    @transactional_session
    def _forge_requests_creation_time(session=None):
        base_time = datetime.utcnow().replace(microsecond=0, minute=0) - timedelta(hours=1)
        assigned_times = set()
        for request in requests:
            request_creation_time = None
            while not request_creation_time or request_creation_time in assigned_times:
                # Ensure uniqueness to avoid multiple valid submission orders and make tests deterministic with simple sorting techniques
                request_creation_time = base_time + timedelta(minutes=randint(0, 3600))
            assigned_times.add(request_creation_time)
            session.query(Request).filter(Request.id == request['id']).update({'created_at': request_creation_time})
            request['created_at'] = request_creation_time

    _forge_requests_creation_time()
    requests = sorted(requests, key=lambda r: r['created_at'])

    for request in requests:
        assert request_core.get_request(request_id=request['id'])['state'] == RequestState.QUEUED

    requests_id_in_submission_order = []
    with patch('rucio.transfertool.mock.MockTransfertool.submit') as mock_transfertool_submit:
        # Record the order of requests passed to MockTranfertool.submit()
        mock_transfertool_submit.side_effect = lambda jobs, _: requests_id_in_submission_order.extend([j['metadata']['request_id'] for j in jobs])

        submitter(once=True, rses=[{'id': rse_id} for _, rse_id in dst_rses], partition_wait_time=None, transfertool='mock', transfertype='single', filter_transfertool=None)

    for request in requests:
        assert request_core.get_request(request_id=request['id'])['state'] == RequestState.SUBMITTED

    # Requests must be submitted in the order of their creation
    assert requests_id_in_submission_order == [r['id'] for r in requests]
示例#20
0
 def test_add_rule_with_r2d2_container_treating_and_duplicate_rule(self):
     """ JUDGE INJECTOR (CORE): Add a replication rule with an r2d2 container treatment and duplicate rule"""
     scope = 'mock'
     container = 'asdf.r2d2_request.2016-04-01-15-00-00.ads.' + str(uuid())
     add_did(scope, container, DIDType.from_sym('CONTAINER'), 'jdoe')
     datasets = []
     for i in range(3):
         files = create_files(3, scope, self.rse1_id)
         dataset = 'dataset_' + str(uuid())
         datasets.append(dataset)
         add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
         attach_dids(scope, dataset, files, 'jdoe')
         attach_dids(scope, container, [{
             'scope': scope,
             'name': dataset
         }], 'jdoe')
     add_rule(dids=[{
         'scope': scope,
         'name': dataset
     }],
              account='jdoe',
              copies=1,
              rse_expression=self.rse1,
              grouping='DATASET',
              weight=None,
              lifetime=900,
              locked=False,
              subscription_id=None,
              ask_approval=False)
     rule_id = add_rule(dids=[{
         'scope': scope,
         'name': container
     }],
                        account='jdoe',
                        copies=1,
                        rse_expression=self.rse1,
                        grouping='DATASET',
                        weight=None,
                        lifetime=900,
                        locked=False,
                        subscription_id=None,
                        ask_approval=True)[0]
     approve_rule(rule_id, approver='root')
     assert (get_rule(rule_id)['state'] == RuleState.INJECT)
     rule_injector(once=True)
     # Check if there is a rule for each file
     with assert_raises(RuleNotFound):
         get_rule(rule_id)
     for dataset in datasets:
         assert (len(
             [r for r in list_rules({
                 'scope': scope,
                 'name': dataset
             })]) > 0)
示例#21
0
def test_singlehop_vs_multihop_priority(rse_factory, root_account, mock_scope, core_config_mock, caches_mock):
    """
    On small distance difference, singlehop is prioritized over multihop
    due to HOP_PENALTY. On big difference, multihop is prioritized
    """
    # +------+    +------+
    # |      | 10 |      |
    # | RSE0 +--->| RSE1 |
    # |      |    |      +-+ 10
    # +------+    +------+ |  +------+       +------+
    #                      +->|      |  200  |      |
    # +------+                | RSE3 |<------| RSE4 |
    # |      |   30      +--->|      |       |      |
    # | RSE2 +-----------+    +------+       +------+
    # |      |
    # +------+
    _, rse0_id = rse_factory.make_posix_rse()
    _, rse1_id = rse_factory.make_posix_rse()
    _, rse2_id = rse_factory.make_posix_rse()
    rse3_name, rse3_id = rse_factory.make_posix_rse()
    _, rse4_id = rse_factory.make_posix_rse()

    add_distance(rse0_id, rse1_id, ranking=10)
    add_distance(rse1_id, rse3_id, ranking=10)
    add_distance(rse2_id, rse3_id, ranking=30)
    add_distance(rse4_id, rse3_id, ranking=200)
    rse_core.add_rse_attribute(rse1_id, 'available_for_multihop', True)

    # add same file to two source RSEs
    file = {'scope': mock_scope, 'name': 'lfn.' + generate_uuid(), 'type': 'FILE', 'bytes': 1, 'adler32': 'beefdead'}
    did = {'scope': file['scope'], 'name': file['name']}
    for rse_id in [rse0_id, rse2_id]:
        add_replicas(rse_id=rse_id, files=[file], account=root_account)

    rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=rse3_name, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)

    # The singlehop must be prioritized
    [[_, [transfer]]] = next_transfers_to_submit(rses=rse_factory.created_rses).items()
    assert len(transfer) == 1
    assert transfer[0].src.rse.id == rse2_id
    assert transfer[0].dst.rse.id == rse3_id

    # add same file to two source RSEs
    file = {'scope': mock_scope, 'name': 'lfn.' + generate_uuid(), 'type': 'FILE', 'bytes': 1, 'adler32': 'beefdead'}
    did = {'scope': file['scope'], 'name': file['name']}
    for rse_id in [rse0_id, rse4_id]:
        add_replicas(rse_id=rse_id, files=[file], account=root_account)

    rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=rse3_name, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)

    # The multihop must be prioritized
    [[_, transfers]] = next_transfers_to_submit(rses=rse_factory.created_rses).items()
    transfer = next(iter(t for t in transfers if t[0].rws.name == file['name']))
    assert len(transfer) == 2
示例#22
0
    def test_add_rule_file_none(self):
        """ REPLICATION RULE (CORE): Add a replication rule on a group of files, NONE Grouping"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        add_rule(dids=files, account='jdoe', copies=2, rse_expression=self.T1, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None)

        # Check if the Locks are created properly
        t1 = set([self.rse1_id, self.rse1_id, self.rse3_id, self.rse5_id])
        for file in files:
            rse_locks = set([lock['rse_id'] for lock in get_replica_locks(scope=file['scope'], name=file['name'])])
            assert(len(t1.intersection(rse_locks)) > 0)
            assert_not_in(self.rse4_id, rse_locks)
示例#23
0
    def test_add_rule_duplicate(self):
        """ REPLICATION RULE (CORE): Add a replication rule duplicate"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        # Add a first rule to the DS
        add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None)

        # Add a second rule and check if the right locks are created
        assert_raises(DuplicateRule, add_rule, dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None)
示例#24
0
def test_fts_non_recoverable_failures_handled_on_multihop(
        vo, did_factory, root_account, replica_client, core_config_mock,
        caches_mock):
    """
    Verify that the poller correctly handles non-recoverable FTS job failures
    """
    src_rse = 'XRD1'
    src_rse_id = rse_core.get_rse_id(rse=src_rse, vo=vo)
    jump_rse = 'XRD3'
    jump_rse_id = rse_core.get_rse_id(rse=jump_rse, vo=vo)
    dst_rse = 'XRD4'
    dst_rse_id = rse_core.get_rse_id(rse=dst_rse, vo=vo)

    all_rses = [src_rse_id, jump_rse_id, dst_rse_id]

    # Register a did which doesn't exist. It will trigger an non-recoverable error during the FTS transfer.
    did = did_factory.random_did()
    replica_client.add_replicas(rse=src_rse,
                                files=[{
                                    'scope': did['scope'].external,
                                    'name': did['name'],
                                    'bytes': 1,
                                    'adler32': 'aaaaaaaa'
                                }])

    rule_core.add_rule(dids=[did],
                       account=root_account,
                       copies=1,
                       rse_expression=dst_rse,
                       grouping='ALL',
                       weight=None,
                       lifetime=None,
                       locked=False,
                       subscription_id=None)
    submitter(once=True,
              rses=[{
                  'id': rse_id
              } for rse_id in all_rses],
              group_bulk=2,
              partition_wait_time=None,
              transfertype='single',
              filter_transfertool=None)

    request = __wait_for_request_state(dst_rse_id=dst_rse_id,
                                       state=RequestState.FAILED,
                                       **did)
    assert request['state'] == RequestState.FAILED
    request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)
    assert request['state'] == RequestState.FAILED
示例#25
0
def add_sync_rule(rse, scope, block, account, session=None):
    rse_id = get_rse_id(rse=rse, session=session)
    scope = InternalScope(scope)
    account = InternalAccount(account)

    files = []
    for file in list_files(scope, block, long=False, session=session):
        try:
            update_replicas_states(
                replicas=[
                    {
                        "scope": scope,
                        "name": file["name"],
                        "rse_id": rse_id,
                        "state": "A",
                    }
                ],
                add_tombstone=False,
                session=session,
            )
        except ReplicaNotFound:
            add_replicas(
                rse_id=rse_id,
                files=[
                    {
                        "scope": scope,
                        "name": file["name"],
                        "bytes": file["bytes"],
                        "adler32": file["adler32"],
                    }
                ],
                account=account,
                ignore_availability=True,
                session=session,
            )

    rules = add_rule(
        dids=[{"scope": scope, "name": block}],
        copies=1,
        rse_expression=rse,
        weight=None,
        lifetime=None,
        grouping="DATASET",
        account=account,
        locked=False,
        subscription_id=None,
        source_replica_expression=None,
        activity="Data Consolidation",
        notify=None,
        purge_replicas=False,
        ignore_availability=True,
        comment=None,
        ask_approval=False,
        asynchronous=False,
        priority=3,
        split_container=False,
        meta=json.dumps({"phedex_group": "DataOps", "phedex_custodial": True}),
        session=session,
    )
    return rules[0]
示例#26
0
    def test_add_rule_dataset_none_with_weights(self):
        """ REPLICATION RULE (CORE): Add a replication rule on a dataset, NONE Grouping, WEIGHTS"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='NONE', weight="fakeweight", lifetime=None, locked=False, subscription_id=None)

        # Check if the Locks are created properly
        t1 = set([self.rse1_id, self.rse3_id, self.rse5_id])
        for file in files:
            rse_locks = set([lock['rse_id'] for lock in get_replica_locks(scope=file['scope'], name=file['name'])])
            assert(len(t1.intersection(rse_locks)) == 2)
            assert_in(self.rse1_id, rse_locks)
示例#27
0
    def test_judge_inject_delayed_rule(self):
        """ JUDGE INJECTOR: Test the judge when injecting a delayed rule"""
        scope = InternalScope('mock', **self.vo)
        files = create_files(1, scope, self.rse1_id)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.DATASET, self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)
        [file] = files

        # Add a delayed rule
        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, delay_injection=3600)[0]

        rule = get_rule(rule_id)
        assert rule['state'] == RuleState.INJECT
        assert rule['updated_at'] < rule['created_at']
        assert datetime.utcnow() + timedelta(seconds=3550) < rule['created_at'] < datetime.utcnow() + timedelta(seconds=3650)

        # The time to create the rule has not yet arrived. The injector must skip this rule, no locks must be created
        rule_injector(once=True)
        assert get_rule(rule_id)['state'] == RuleState.INJECT
        assert not get_replica_locks(scope=file['scope'], name=file['name'])

        # simulate that time to inject the rule has arrived
        @transactional_session
        def __update_created_at(session=None):
            session.query(ReplicationRule).filter_by(id=rule_id).one().created_at = datetime.utcnow()
        __update_created_at()

        # The injector must create the locks now
        rule_injector(once=True)
        assert get_rule(rule_id)['state'] == RuleState.REPLICATING
        assert len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2
示例#28
0
    def test_add_rule_with_ignore_availability(self):
        """ REPLICATION RULE (CORE): Add a replication rule with ignore_availability setting"""
        rse = rse_name_generator()
        add_rse(rse)
        update_rse(rse, {'availability_write': False})

        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        with assert_raises(InvalidRSEExpression):
            add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=rse, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None)[0]

        add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=rse, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None, ignore_availability=True)[0]
示例#29
0
    def test_to_repair_a_rule_with_DATASET_grouping_whose_transfer_failed(
            self):
        """ JUDGE REPAIRER: Test to repair a rule with 1 failed transfer (lock)"""

        rule_repairer(once=True)  # Clean out the repairer
        scope = InternalScope('mock')
        files = create_files(4, scope, self.rse4_id, bytes=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)

        rule_id = add_rule(dids=[{
            'scope': scope,
            'name': dataset
        }],
                           account=self.jdoe,
                           copies=1,
                           rse_expression=self.T1,
                           grouping='DATASET',
                           weight=None,
                           lifetime=None,
                           locked=False,
                           subscription_id=None,
                           activity='DebugJudge')[0]

        successful_transfer(
            scope=scope,
            name=files[0]['name'],
            rse_id=get_replica_locks(scope=files[0]['scope'],
                                     name=files[2]['name'])[0].rse_id,
            nowait=False)
        successful_transfer(
            scope=scope,
            name=files[1]['name'],
            rse_id=get_replica_locks(scope=files[1]['scope'],
                                     name=files[2]['name'])[0].rse_id,
            nowait=False)
        failed_transfer(
            scope=scope,
            name=files[2]['name'],
            rse_id=get_replica_locks(scope=files[2]['scope'],
                                     name=files[2]['name'])[0].rse_id)
        failed_transfer(
            scope=scope,
            name=files[3]['name'],
            rse_id=get_replica_locks(scope=files[3]['scope'],
                                     name=files[3]['name'])[0].rse_id)

        assert (rule_id == get_rule(rule_id)['id'].replace('-', '').lower())
        assert (RuleState.STUCK == get_rule(rule_id)['state'])
        rule_repairer(once=True)
        assert (RuleState.REPLICATING == get_rule(rule_id)['state'])
        assert (get_replica_locks(
            scope=files[2]['scope'],
            name=files[2]['name'])[0].rse_id == get_replica_locks(
                scope=files[3]['scope'], name=files[3]['name'])[0].rse_id)
        assert (get_replica_locks(
            scope=files[1]['scope'],
            name=files[1]['name'])[0].rse_id == get_replica_locks(
                scope=files[3]['scope'], name=files[3]['name'])[0].rse_id)
示例#30
0
    def test_to_repair_a_rule_with_only_1_rse_whose_site_is_blocklisted(self):
        """ JUDGE REPAIRER: Test to repair a rule with only 1 rse whose site is blocklisted"""

        rse = rse_name_generator()
        rse_id = add_rse(rse, **self.vo)
        update_rse(rse_id, {'availability_write': False})
        set_local_account_limit(self.jdoe, rse_id, -1)

        rule_repairer(once=True)  # Clean out the repairer
        scope = InternalScope('mock', **self.vo)
        files = create_files(4, scope, self.rse4_id, bytes=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.DATASET, self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)

        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=rse, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, ignore_availability=True, activity='DebugJudge')[0]

        assert(RuleState.STUCK == get_rule(rule_id)['state'])
        rule_repairer(once=True)

        # Stil assert STUCK because of ignore_availability:
        assert(RuleState.STUCK == get_rule(rule_id)['state'])

        region = make_region().configure('dogpile.cache.memcached',
                                         expiration_time=3600,
                                         arguments={'url': config_get('cache', 'url', False, '127.0.0.1:11211'), 'distributed_lock': True})
        region.delete(sha256(rse.encode()).hexdigest())

        update_rse(rse_id, {'availability_write': True})
        rule_repairer(once=True)
        assert(RuleState.REPLICATING == get_rule(rule_id)['state'])
示例#31
0
    def test_bb8_rebalance_rule(self):
        """ BB8: Test the rebalance rule method"""
        scope = InternalScope('mock', **self.vo)
        files = create_files(3, scope, self.rse1_id)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)

        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0]

        rule = {}
        try:
            rule = get_rule(rule_id)
        except:
            assert_raises(RuleNotFound, get_rule, rule_id)
        child_rule = rebalance_rule(rule, 'Rebalance', self.rse3, priority=3)

        rule_cleaner(once=True)

        assert(get_rule(rule_id)['expires_at'] <= datetime.utcnow())
        assert(get_rule(rule_id)['child_rule_id'] == child_rule)

        rule_cleaner(once=True)

        assert(get_rule(rule_id)['expires_at'] <= datetime.utcnow())

        successful_transfer(scope=scope, name=files[0]['name'], rse_id=self.rse3_id, nowait=False)
        successful_transfer(scope=scope, name=files[1]['name'], rse_id=self.rse3_id, nowait=False)
        with assert_raises(UnsupportedOperation):
            delete_rule(rule_id)
        successful_transfer(scope=scope, name=files[2]['name'], rse_id=self.rse3_id, nowait=False)

        rule_cleaner(once=True)
        assert(get_rule(child_rule)['state'] == RuleState.OK)
示例#32
0
    def test_to_repair_a_rule_with_only_1_rse_whose_transfers_failed(self):
        """ JUDGE REPAIRER: Test to repair a rule with only 1 rse whose transfers failed (lock)"""

        rule_repairer(once=True)  # Clean out the repairer
        scope = InternalScope('mock', **self.vo)
        files = create_files(4, scope, self.rse4_id, bytes=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.DATASET, self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)

        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0]

        successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False)
        successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False)
        failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id)
        failed_transfer(scope=scope, name=files[3]['name'], rse_id=get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id)
        cancel_request_did(scope=scope, name=files[2]['name'], dest_rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id)
        cancel_request_did(scope=scope, name=files[3]['name'], dest_rse_id=get_replica_locks(scope=files[3]['scope'], name=files[2]['name'])[0].rse_id)

        assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower())
        assert(RuleState.STUCK == get_rule(rule_id)['state'])
        rule_repairer(once=True)

        # Stil assert STUCK because of delays:
        assert(RuleState.STUCK == get_rule(rule_id)['state'])
        assert(get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id == get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id)
示例#33
0
    def test_to_repair_a_rule_with_NONE_grouping_whose_transfer_failed(self):
        """ JUDGE REPAIRER: Test to repair a rule with 1 failed transfer (lock)"""

        rule_repairer(once=True)  # Clean out the repairer
        scope = InternalScope('mock', **self.vo)
        files = create_files(3, scope, self.rse4_id, bytes=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.DATASET, self.jdoe)
        attach_dids(scope, dataset, files, self.jdoe)

        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.T1, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None)[0]

        failed_rse_id = get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id
        assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['state'] == ReplicaState.COPYING)
        assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['lock_cnt'] == 1)

        successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False)
        successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False)
        failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id)

        assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower())
        assert(RuleState.STUCK == get_rule(rule_id)['state'])
        rule_repairer(once=True)
        assert(RuleState.REPLICATING == get_rule(rule_id)['state'])
        assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['state'] == ReplicaState.UNAVAILABLE)
        assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['lock_cnt'] == 0)
示例#34
0
    def test_to_repair_a_rule_with_only_1_rse_whose_transfers_failed(self):
        """ JUDGE REPAIRER: Test to repair a rule with only 1 rse whose transfers failed (lock)"""

        rule_repairer(once=True)  # Clean out the repairer
        scope = 'mock'
        files = create_files(4, scope, self.rse4, bytes=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0]

        successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False)
        successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False)
        failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id)
        failed_transfer(scope=scope, name=files[3]['name'], rse_id=get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id)
        cancel_request_did(scope=scope, name=files[2]['name'], dest_rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id)
        cancel_request_did(scope=scope, name=files[3]['name'], dest_rse_id=get_replica_locks(scope=files[3]['scope'], name=files[2]['name'])[0].rse_id)

        assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower())
        assert(RuleState.STUCK == get_rule(rule_id)['state'])
        rule_repairer(once=True)
        # Stil assert STUCK because of delays:
        assert(RuleState.STUCK == get_rule(rule_id)['state'])
        assert(get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id == get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id)
示例#35
0
    def test_judge_deny_rule(self):
        """ JUDGE INJECTOR: Test the judge when asking approval for a rule and denying it"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1_id)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        # Add a first rule to the DS
        rule_id = add_rule(dids=[{
            'scope': scope,
            'name': dataset
        }],
                           account='jdoe',
                           copies=1,
                           rse_expression=self.rse4,
                           grouping='DATASET',
                           weight=None,
                           lifetime=None,
                           locked=False,
                           subscription_id=None,
                           ask_approval=True)[0]

        assert (get_rule(rule_id)['state'] == RuleState.WAITING_APPROVAL)

        deny_rule(rule_id=rule_id, approver='root')

        assert_raises(RuleNotFound, get_rule, rule_id)
示例#36
0
    def test_judge_inject_rule(self):
        """ JUDGE INJECTOR: Test the judge when injecting a rule"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1_id)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        # Add a first rule to the DS
        rule_id = add_rule(dids=[{
            'scope': scope,
            'name': dataset
        }],
                           account='jdoe',
                           copies=2,
                           rse_expression=self.T1,
                           grouping='DATASET',
                           weight=None,
                           lifetime=None,
                           locked=False,
                           subscription_id=None,
                           asynchronous=True)[0]

        assert (get_rule(rule_id)['state'] == RuleState.INJECT)

        rule_injector(once=True)

        # Check if the Locks are created properly
        for file in files:
            assert (len(
                get_replica_locks(scope=file['scope'],
                                  name=file['name'])) == 2)
        assert (get_rule(rule_id)['state'] == RuleState.REPLICATING)
示例#37
0
    def test_s3s_fts_dst(self):
        """ S3: TPC a file from storage to S3 """

        expected_src_url = 'https://somestorage.ch:1094/my/prefix/mock/ab/01/file-on-storage?copy_mode=push'
        expected_dst_url = 's3s://fake-rucio.s3-eu-south-8.amazonaws.com:443/mock/ab/01/file-on-storage'

        rule_id = add_rule(dids=self.filenons3,
                           account=self.root,
                           copies=1,
                           rse_expression=self.rses3,
                           grouping='NONE',
                           weight=None,
                           lifetime=None,
                           locked=False,
                           subscription_id=None)

        requestss = get_transfer_requests_and_source_replicas(
            rses=[self.rses3])
        for requests in requestss:
            for request in requests:
                if requests[request]['rule_id'] == rule_id[0]:
                    assert requests[request]['sources'][0][
                        1] == expected_src_url
                    assert requests[request]['dest_urls'][
                        0] == expected_dst_url
示例#38
0
    def test_judge_add_files_to_dataset_with_2_rules(self):
        """ JUDGE EVALUATOR: Test the judge when adding files to dataset with 2 rules"""
        scope = InternalScope('mock', **self.vo)
        files = create_files(3, scope, self.rse1_id)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.DATASET, self.jdoe)

        # Add a first rule to the DS
        add_rule(dids=[{
            'scope': scope,
            'name': dataset
        }],
                 account=self.jdoe,
                 copies=1,
                 rse_expression=self.rse5,
                 grouping='DATASET',
                 weight=None,
                 lifetime=None,
                 locked=False,
                 subscription_id=None)
        add_rule(dids=[{
            'scope': scope,
            'name': dataset
        }],
                 account=self.root,
                 copies=1,
                 rse_expression=self.rse5,
                 grouping='DATASET',
                 weight=None,
                 lifetime=None,
                 locked=False,
                 subscription_id=None)

        attach_dids(scope, dataset, files, self.jdoe)
        re_evaluator(once=True)

        files = create_files(3, scope, self.rse1_id)
        attach_dids(scope, dataset, files, self.jdoe)

        # Fake judge
        re_evaluator(once=True)

        # Check if the Locks are created properly
        for file in files:
            assert (len(
                get_replica_locks(scope=file['scope'],
                                  name=file['name'])) == 2)
示例#39
0
def test_list_rules_states(vo, rest_client, auth_token):
    """ SUBSCRIPTION (REST): Test listing of rule states for subscription """
    tmp_scope = InternalScope('mock_' + uuid()[:8], vo=vo)
    root = InternalAccount('root', vo=vo)
    add_scope(tmp_scope, root)
    site_a = 'RSE%s' % uuid().upper()
    site_b = 'RSE%s' % uuid().upper()

    site_a_id = add_rse(site_a, vo=vo)
    site_b_id = add_rse(site_b, vo=vo)

    # Add quota
    set_local_account_limit(root, site_a_id, -1)
    set_local_account_limit(root, site_b_id, -1)

    # add a new dataset
    dsn = 'dataset-%s' % uuid()
    add_did(scope=tmp_scope, name=dsn,
            did_type=DIDType.DATASET, account=root)

    subscription_name = uuid()
    subid = add_subscription(name=subscription_name,
                             account='root',
                             filter_={'account': ['root', ], 'scope': [tmp_scope.external, ]},
                             replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}],
                             lifetime=100000,
                             retroactive=0,
                             dry_run=0,
                             comments='We want a shrubbery',
                             issuer='root',
                             vo=vo)

    # Add two rules
    add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account=root, copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid)
    add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account=root, copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid)

    response = rest_client.get('/subscriptions/%s/%s/Rules/States' % ('root', subscription_name), headers=headers(auth(auth_token)))
    assert response.status_code == 200

    rulestates = None
    for line in response.get_data(as_text=True).split('\n'):
        if line:
            rulestates = loads(line)
            if rulestates[1] == subscription_name:
                break
    assert rulestates is not None
    assert rulestates[3] == 2
示例#40
0
    def test_list_rules_states(self):
        """ SUBSCRIPTION (REST): Test listing of rule states for subscription """
        tmp_scope = 'mock_' + uuid()[:8]
        add_scope(tmp_scope, 'root')
        mw = []
        site_a = 'RSE%s' % uuid().upper()
        site_b = 'RSE%s' % uuid().upper()

        add_rse(site_a)
        add_rse(site_b)

        # add a new dataset
        dsn = 'dataset-%s' % uuid()
        add_did(scope=tmp_scope, name=dsn,
                type=DIDType.DATASET, account='root')

        subscription_name = uuid()
        id = add_subscription(name=subscription_name, account='root', filter={'account': 'root'},
                              replication_rules=[(1, 'T1_DATADISK', False, True)], lifetime=100000, retroactive=0, dry_run=0, comments='We want a shrubbery')

        subscriptions = list_subscriptions(name=subscription_name, account='root')

        # workaround until add_subscription returns the id
        id = None
        for s in subscriptions:
            id = s['id']

        # Add two rules
        add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=id)
        add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account='root', copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=id)

        headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': '******', 'X-Rucio-Password': '******'}
        r1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)

        assert_equal(r1.status, 200)
        token = str(r1.header('X-Rucio-Auth-Token'))

        headers2 = {'X-Rucio-Auth-Token': str(token)}
        r2 = TestApp(subs_app.wsgifunc(*mw)).get('/%s/%s/Rules/States' % ('root', subscription_name), headers=headers2, expect_errors=True)

        for line in r2.body.split('\n'):
            print line
            rs = loads(line)
            if rs[1] == subscription_name:
                break
        assert_equal(rs[3], 2)
示例#41
0
    def test_list_rules_by_did(self):
        """ DID (CLIENT): List Replication Rules per DID """
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        rule_id_1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0]

        rule_id_2 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse3, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0]

        ret = self.did_client.list_did_rules(scope=scope, name=dataset)
        ids = [rule['id'] for rule in ret]

        assert_in(rule_id_1, ids)
        assert_in(rule_id_2, ids)
示例#42
0
文件: test_bb8.py 项目: rak108/rucio
def test_bb8_rebalance_rule(vo, root_account, jdoe_account, rse_factory, mock_scope, did_factory):
    """BB8: Test the rebalance rule method"""
    rse1, rse1_id = rse_factory.make_posix_rse()
    rse2, rse2_id = rse_factory.make_posix_rse()

    # Add Tags
    T1 = tag_generator()
    T2 = tag_generator()
    add_rse_attribute(rse1_id, T1, True)
    add_rse_attribute(rse2_id, T2, True)

    # Add fake weights
    add_rse_attribute(rse1_id, "fakeweight", 10)
    add_rse_attribute(rse2_id, "fakeweight", 0)

    # Add quota
    set_local_account_limit(jdoe_account, rse1_id, -1)
    set_local_account_limit(jdoe_account, rse2_id, -1)

    set_local_account_limit(root_account, rse1_id, -1)
    set_local_account_limit(root_account, rse2_id, -1)

    files = create_files(3, mock_scope, rse1_id)
    dataset = did_factory.make_dataset()
    attach_dids(mock_scope, dataset['name'], files, jdoe_account)
    set_status(mock_scope, dataset['name'], open=False)

    # Invalid the cache because the result of parse_expression is cached
    REGION.invalidate()

    rule_id = add_rule(dids=[{'scope': mock_scope, 'name': dataset['name']}], account=jdoe_account, copies=1, rse_expression=rse1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0]
    rule = {}
    try:
        rule = get_rule(rule_id)
    except:
        pytest.raises(RuleNotFound, get_rule, rule_id)
    child_rule = rebalance_rule(rule, 'Rebalance', rse2, priority=3)

    rule_cleaner(once=True)

    assert(get_rule(rule_id)['expires_at'] <= datetime.utcnow())
    assert(get_rule(rule_id)['child_rule_id'] == child_rule)

    rule_cleaner(once=True)

    assert(get_rule(rule_id)['expires_at'] <= datetime.utcnow())

    successful_transfer(scope=mock_scope, name=files[0]['name'], rse_id=rse2_id, nowait=False)
    successful_transfer(scope=mock_scope, name=files[1]['name'], rse_id=rse2_id, nowait=False)
    with pytest.raises(UnsupportedOperation):
        delete_rule(rule_id)
    successful_transfer(scope=mock_scope, name=files[2]['name'], rse_id=rse2_id, nowait=False)

    rule_cleaner(once=True)
    assert(get_rule(child_rule)['state'] == RuleState.OK)
    set_metadata(mock_scope, dataset['name'], 'lifetime', -86400)
    undertaker.run(once=True)
示例#43
0
    def test_rse_counter_unavailable_replicas(self):
        """ REPLICATION RULE (CORE): Test if creating UNAVAILABLE replicas updates the RSE Counter correctly"""

        rse_update(once=True)
        rse_counter_before = get_rse_counter(self.rse3_id)

        scope = 'mock'
        files = create_files(3, scope, self.rse1, bytes=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse3, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)

        # Check if the rse has been updated correctly
        rse_update(once=True)
        rse_counter_after = get_rse_counter(self.rse3_id)
        assert(rse_counter_before['bytes'] + 3*100 == rse_counter_after['bytes'])
        assert(rse_counter_before['files'] + 3 == rse_counter_after['files'])
示例#44
0
    def test_add_rule_container_none(self):
        """ REPLICATION RULE (CORE): Add a replication rule on a container, NONE Grouping"""
        scope = 'mock'
        container = 'container_' + str(uuid())
        add_did(scope, container, DIDType.from_sym('CONTAINER'), 'jdoe')
        all_files = []
        for i in xrange(3):
            files = create_files(3, scope, self.rse1)
            all_files.extend(files)
            dataset = 'dataset_' + str(uuid())
            add_did(scope, dataset,  DIDType.from_sym('DATASET'), 'jdoe')
            attach_dids(scope, dataset, files, 'jdoe')
            attach_dids(scope, container, [{'scope': scope, 'name': dataset}], 'jdoe')

        add_rule(dids=[{'scope': scope, 'name': container}], account='jdoe', copies=1, rse_expression=self.T2, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None)
        for file in all_files:
            rse_locks = set([lock['rse_id'] for lock in get_replica_locks(scope=file['scope'], name=file['name'])])
            assert_in(self.rse4_id, rse_locks)
            assert_not_in(self.rse5_id, rse_locks)
示例#45
0
    def test_delete_rule_and_cancel_transfers(self):
        """ REPLICATION RULE (CORE): Test to delete a previously created rule and do not cancel overlapping transfers"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        rule_id_1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0]
        add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0]
        add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=3, rse_expression=self.T1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0]

        delete_rule(rule_id_1)

        for file in files:
            rse_locks = get_replica_locks(scope=file['scope'], name=file['name'])
            assert(len(rse_locks) == 5)
            # TODO Need to check transfer queue here, this is actually not the check of this test case
        assert_raises(RuleNotFound, delete_rule, uuid())
示例#46
0
    def test_account_counter_rule_create(self):
        """ REPLICATION RULE (CORE): Test if the account counter is updated correctly when new rule is created"""

        account_update(once=True)
        account_counter_before = get_account_counter(self.rse1_id, 'jdoe')

        scope = 'mock'
        files = create_files(3, scope, self.rse1, bytes=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)

        # Check if the counter has been updated correctly
        account_update(once=True)
        account_counter_after = get_account_counter(self.rse1_id, 'jdoe')
        assert(account_counter_before['bytes'] + 3*100 == account_counter_after['bytes'])
        assert(account_counter_before['files'] + 3 == account_counter_after['files'])
示例#47
0
    def test_get_rule(self):
        """ REPLICATION RULE (CORE): Test to get a previously created rule"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0]
        assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower())
        assert_raises(RuleNotFound, get_rule, uuid())
示例#48
0
    def test_dataset_lock(self):
        """ DATASETLOCK (CLIENT): Get a datasetlock for a specific dataset"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        rule_id_1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight='fakeweight', lifetime=None, locked=True, subscription_id=None)[0]

        rule_ids = [lock['rule_id'] for lock in self.lock_client.get_dataset_locks(scope=scope, name=dataset)]
        assert_in(rule_id_1, rule_ids)
示例#49
0
    def test_delete_rule(self):
        """ REPLICATION RULE (CLIENT): Delete a replication rule """
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0]

        ret = self.rule_client.delete_replication_rule(rule_id=rule_id)
        assert(ret is True)
        assert_raises(RuleNotFound, self.rule_client.delete_replication_rule, rule_id)
示例#50
0
    def test_judge_add_files_to_dataset(self):
        """ JUDGE EVALUATOR: Test the judge when adding files to dataset"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')

        # Add a first rule to the DS
        add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)

        attach_dids(scope, dataset, files, 'jdoe')
        re_evaluator(once=True)

        files = create_files(3, scope, self.rse1)
        attach_dids(scope, dataset, files, 'jdoe')

        # Fake judge
        re_evaluator(once=True)

        # Check if the Locks are created properly
        for file in files:
            assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2)
示例#51
0
    def test_locked_rule(self):
        """ REPLICATION RULE (CLIENT): Delete a locked replication rule"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        rule_id_1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='NONE', weight='fakeweight', lifetime=None, locked=True, subscription_id=None)[0]

        assert_raises(AccessDenied, delete_rule, rule_id_1)
        self.rule_client.update_replication_rule(rule_id=rule_id_1, options={'locked': False})
        delete_rule(rule_id=rule_id_1)
示例#52
0
    def test_delete_rule(self):
        """ REPLICATION RULE (CORE): Test to delete a previously created rule"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='DATASET', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0]
        delete_rule(rule_id)
        for file in files:
            rse_locks = get_replica_locks(scope=file['scope'], name=file['name'])
            assert(len(rse_locks) == 0)
        assert_raises(RuleNotFound, delete_rule, uuid())
示例#53
0
文件: rule.py 项目: pombredanne/rucio
def add_replication_rule(dids, copies, rse_expression, weight, lifetime, grouping, account, locked, subscription_id, source_replica_expression, activity, notify, purge_replicas, ignore_availability, issuer):
    """
    Adds a replication rule.

    :param dids:                       The data identifier set.
    :param copies:                     The number of replicas.
    :param rse_expression:             Boolean string expression to give the list of RSEs.
    :param weight:                     If the weighting option of the replication rule is used, the choice of RSEs takes their weight into account.
    :param lifetime:                   The lifetime of the replication rules (in seconds).
    :param grouping:                   ALL -  All files will be replicated to the same RSE.
                                       DATASET - All files in the same dataset will be replicated to the same RSE.
                                       NONE - Files will be completely spread over all allowed RSEs without any grouping considerations at all.
    :param account:                    The account owning the rule.
    :param locked:                     If the rule is locked, it cannot be deleted.
    :param subscription_id:            The subscription_id, if the rule is created by a subscription.
    :param source_replica_expression:  Only use replicas from this RSE as sources.
    :param activity:                   Activity to be passed on to the conveyor.
    :param notify:                     Notification setting of the rule.
    :purge purge_replicas:             The purge setting to delete replicas immediately after rule deletion.
    :param ignore_availability:        Option to ignore the availability of RSEs.
    :param issuer:                     The issuing account of this operation.
    :returns:                          List of created replication rules.
    """
    if account is None:
        account = issuer

    kwargs = {'dids': dids, 'copies': copies, 'rse_expression': rse_expression, 'weight': weight, 'lifetime': lifetime,
              'grouping': grouping, 'account': account, 'locked': locked, 'subscription_id': subscription_id,
              'source_replica_expression': source_replica_expression, 'notify': notify, 'activity': activity,
              'purge_replicas': purge_replicas, 'ignore_availability': ignore_availability}

    validate_schema(name='activity', obj=kwargs['activity'])

    if not has_permission(issuer=issuer, action='add_rule', kwargs=kwargs):
        raise AccessDenied('Account %s can not add replication rule' % (issuer))
    return rule.add_rule(account=account,
                         dids=dids,
                         copies=copies,
                         rse_expression=rse_expression,
                         grouping=grouping,
                         weight=weight,
                         lifetime=lifetime,
                         locked=locked,
                         subscription_id=subscription_id,
                         source_replica_expression=source_replica_expression,
                         activity=activity,
                         notify=notify,
                         purge_replicas=purge_replicas,
                         ignore_availability=ignore_availability)
示例#54
0
    def test_add_rule_dataset_dataset(self):
        """ REPLICATION RULE (CORE): Add a replication rule on a dataset, DATASET Grouping"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)

        # Check if the Locks are created properly
        t1 = set([self.rse1_id, self.rse3_id, self.rse5_id])
        first_locks = None
        for file in files:
            if first_locks is None:
                first_locks = set([lock['rse_id'] for lock in get_replica_locks(scope=file['scope'], name=file['name'])])
            rse_locks = set([lock['rse_id'] for lock in get_replica_locks(scope=file['scope'], name=file['name'])])
            assert(len(t1.intersection(rse_locks)) == 2)
            assert(len(first_locks.intersection(rse_locks)) == 2)

        # Check if the DatasetLocks are created properly
        dataset_locks = [lock for lock in get_dataset_locks(scope=scope, name=dataset)]
        assert(len(t1.intersection(set([lock['rse_id'] for lock in dataset_locks]))) == 2)
        assert(len(first_locks.intersection(set([lock['rse_id'] for lock in dataset_locks]))) == 2)
示例#55
0
    def test_add_rule_with_purge(self):
        """ REPLICATION RULE (CORE): Add a replication rule with purge setting"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse4, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None, purge_replicas=True)[0]

        delete_rule(rule_id)

        # Check if the Locks are created properly
        for file in files:
            replica = get_replica(rse=self.rse4, scope=file['scope'], name=file['name'])
            assert(replica['tombstone'] == OBSOLETE)
示例#56
0
    def test_change_rule_lifetime(self):
        """ REPLICATION RULE (CLIENT): Change rule lifetime"""
        scope = 'mock'
        files = create_files(3, scope, self.rse1)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        rule_id_1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight='fakeweight', lifetime=150, locked=True, subscription_id=None)[0]

        get = self.rule_client.get_replication_rule(rule_id_1)

        self.rule_client.update_replication_rule(rule_id_1, options={'lifetime': 10000})

        get2 = self.rule_client.get_replication_rule(rule_id_1)

        assert(get['expires_at'] != get2['expires_at'])
示例#57
0
    def test_dataset_callback_no(self):
        """ REPLICATION RULE (CORE): Test dataset callback should not be sent"""

        scope = 'mock'
        files = create_files(3, scope, self.rse1, bytes=100)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')
        attach_dids(scope, dataset, files, 'jdoe')

        set_status(scope=scope, name=dataset, open=False)

        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse3, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, notify='C')[0]

        successful_transfer(scope=scope, name=files[0]['name'], rse_id=self.rse3_id, nowait=False)
        successful_transfer(scope=scope, name=files[1]['name'], rse_id=self.rse3_id, nowait=False)

        # Check if rule exists
        assert(False == check_dataset_ok_callback(scope, dataset, self.rse3, rule_id))
示例#58
0
    def test_repair_a_rule_with_missing_locks(self):
        """ JUDGE EVALUATOR: Test the judge when a rule gets STUCK from re_evaluating and there are missing locks"""
        scope = 'mock'
        files = create_files(3, scope, self.rse4)
        dataset = 'dataset_' + str(uuid())
        add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe')

        # Add a first rule to the DS
        rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0]

        attach_dids(scope, dataset, files, 'jdoe')

        # Fake judge
        re_evaluator(once=True)

        # Check if the Locks are created properly
        for file in files:
            assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2)

        # Add more files to the DID
        files2 = create_files(3, scope, self.rse4)
        attach_dids(scope, dataset, files2, 'jdoe')

        # Mark the rule STUCK to fake that the re-evaluation failed
        session = get_session()
        rule = session.query(models.ReplicationRule).filter_by(id=rule_id).one()
        rule.state = RuleState.STUCK
        session.commit()

        rule_repairer(once=True)

        for file in files:
            assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2)
        for file in files2:
            assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2)
            assert(len(set([lock.rse_id for lock in get_replica_locks(scope=files[0]['scope'], name=files[0]['name'])]).intersection(set([lock.rse_id for lock in get_replica_locks(scope=file['scope'], name=file['name'])]))) == 2)
        assert(12 == get_rule(rule_id)['locks_replicating_cnt'])
示例#59
0
def request_transfer(once=False, src=None, dst=None):
    """
    Main loop to request a new transfer.
    """

    logging.info('request: starting')

    site_a = 'RSE%s' % generate_uuid().upper()
    site_b = 'RSE%s' % generate_uuid().upper()

    scheme = 'https'
    impl = 'rucio.rse.protocols.webdav.Default'
    if not src.startswith('https://'):
        scheme = 'srm'
        impl = 'rucio.rse.protocols.srm.Default'
        srctoken = src.split(':')[0]
        dsttoken = dst.split(':')[0]

    tmp_proto = {
        'impl': impl,
        'scheme': scheme,
        'domains': {
            'lan': {'read': 1, 'write': 1, 'delete': 1},
            'wan': {'read': 1, 'write': 1, 'delete': 1}}}

    rse.add_rse(site_a)
    tmp_proto['hostname'] = src.split(':')[1][2:]
    tmp_proto['port'] = src.split(':')[2].split('/')[0]
    tmp_proto['prefix'] = '/'.join([''] + src.split(':')[2].split('/')[1:])
    if scheme == 'srm':
        tmp_proto['extended_attributes'] = {'space_token': srctoken,
                                            'web_service_path': ''}
    rse.add_protocol(site_a, tmp_proto)

    tmp_proto = {
        'impl': impl,
        'scheme': scheme,
        'domains': {
            'lan': {'read': 1, 'write': 1, 'delete': 1},
            'wan': {'read': 1, 'write': 1, 'delete': 1}}}

    rse.add_rse(site_b)
    tmp_proto['hostname'] = dst.split(':')[1][2:]
    tmp_proto['port'] = dst.split(':')[2].split('/')[0]
    tmp_proto['prefix'] = '/'.join([''] + dst.split(':')[2].split('/')[1:])
    if scheme == 'srm':
        tmp_proto['extended_attributes'] = {'space_token': dsttoken,
                                            'web_service_path': ''}
    rse.add_protocol(site_b, tmp_proto)

    si = rsemanager.get_rse_info(site_a)

    session = get_session()

    logging.info('request: started')

    while not graceful_stop.is_set():

        try:

            ts = time.time()

            tmp_name = generate_uuid()

            # add a new dataset
            did.add_did(scope='mock', name='dataset-%s' % tmp_name,
                        type=DIDType.DATASET, account='root', session=session)

            # construct PFN
            pfn = rsemanager.lfns2pfns(si, lfns=[{'scope': 'mock', 'name': 'file-%s' % tmp_name}])['mock:file-%s' % tmp_name]

            # create the directories if needed
            p = rsemanager.create_protocol(si, operation='write', scheme=scheme)
            p.connect()
            try:
                p.mkdir(pfn)
            except:
                pass

            # upload the test file
            try:
                fp = os.path.dirname(config_get('injector', 'file'))
                fn = os.path.basename(config_get('injector', 'file'))
                p.put(fn, pfn, source_dir=fp)
            except:
                logging.critical('Could not upload, removing temporary DID: %s' % str(sys.exc_info()))
                did.delete_dids([{'scope': 'mock', 'name': 'dataset-%s' % tmp_name}], account='root', session=session)
                break

            # add the replica
            replica.add_replica(rse=site_a, scope='mock', name='file-%s' % tmp_name,
                                bytes=config_get_int('injector', 'bytes'),
                                adler32=config_get('injector', 'adler32'),
                                md5=config_get('injector', 'md5'),
                                account='root', session=session)

            # to the dataset
            did.attach_dids(scope='mock', name='dataset-%s' % tmp_name, dids=[{'scope': 'mock',
                                                                               'name': 'file-%s' % tmp_name,
                                                                               'bytes': config_get('injector', 'bytes')}],
                            account='root', session=session)

            # add rule for the dataset
            ts = time.time()

            rule.add_rule(dids=[{'scope': 'mock', 'name': 'dataset-%s' % tmp_name}],
                          account='root',
                          copies=1,
                          rse_expression=site_b,
                          grouping='ALL',
                          weight=None,
                          lifetime=None,
                          locked=False,
                          subscription_id=None,
                          activity='mock-injector',
                          session=session)

            logging.info('added rule for %s for DID mock:%s' % (site_b, tmp_name))
            record_timer('daemons.mock.conveyorinjector.add_rule', (time.time()-ts)*1000)

            record_counter('daemons.mock.conveyorinjector.request_transfer')

            session.commit()
        except:
            session.rollback()
            logging.critical(traceback.format_exc())

        if once:
            return

    logging.info('request: graceful stop requested')

    logging.info('request: graceful stop done')
示例#60
0
def transmogrifier(worker_number=1, total_workers=1, chunk_size=5, once=False):
    """
    Creates a Transmogrifier Worker that gets a list of new DIDs for a given hash, identifies the subscriptions matching the DIDs and submit a replication rule for each DID matching a subscription.

    param worker_number: The number of the worker (thread).
    param total_number: The total number of workers (threads).
    chunk_size: The chunk of the size to process.
    once: To run only once
    """
    while not graceful_stop.is_set():
        dids, subscriptions = [], []
        tottime = 0
        try:
            for did in list_new_dids(worker_number=worker_number, total_workers=total_workers, chunk_size=chunk_size):
                d = {"scope": did["scope"], "did_type": str(did["did_type"]), "name": did["name"]}
                dids.append(d)
            for sub in list_subscriptions(None, None):
                if sub["state"] in [SubscriptionState.ACTIVE, SubscriptionState.UPDATED]:
                    subscriptions.append(sub)
        except:
            logging.error("Thread %i : Failed to get list of new DIDs or subsscriptions" % (worker_number))
            if once:
                break
            else:
                continue

        try:
            results = {}
            start_time = time.time()
            logging.debug("Thread %i : In transmogrifier worker" % (worker_number))
            identifiers = []
            for did in dids:
                if did["did_type"] == str(DIDType.DATASET) or did["did_type"] == str(DIDType.CONTAINER):
                    results["%s:%s" % (did["scope"], did["name"])] = []
                    try:
                        metadata = get_metadata(did["scope"], did["name"])
                        for subscription in subscriptions:
                            if is_matching_subscription(subscription, did, metadata) is True:
                                stime = time.time()
                                results["%s:%s" % (did["scope"], did["name"])].append(subscription["id"])
                                logging.info(
                                    "Thread %i : %s:%s matches subscription %s"
                                    % (worker_number, did["scope"], did["name"], subscription["name"])
                                )
                                for rule in loads(subscription["replication_rules"]):
                                    grouping = rule.get("grouping", "DATASET")
                                    lifetime = rule.get("lifetime", None)
                                    if lifetime:
                                        lifetime = int(lifetime)
                                    weight = rule.get("weight", None)
                                    source_replica_expression = rule.get("source_replica_expression", None)
                                    activity = rule.get("activity", None)
                                    try:
                                        rse_expression = str(rule["rse_expression"]).encode("string-escape")
                                        add_rule(
                                            dids=[{"scope": did["scope"], "name": did["name"]}],
                                            account=subscription["account"],
                                            copies=int(rule["copies"]),
                                            rse_expression=rse_expression,
                                            grouping=grouping,
                                            weight=weight,
                                            lifetime=lifetime,
                                            locked=False,
                                            subscription_id=subscription["id"],
                                            source_replica_expression=source_replica_expression,
                                            activity=activity,
                                        )
                                        monitor.record_counter(counters="transmogrifier.addnewrule.done", delta=1)
                                        if subscription["name"].find("test") > -1:
                                            monitor.record_counter(
                                                counters="transmogrifier.addnewrule.activity.test", delta=1
                                            )
                                        elif subscription["name"].startswith("group"):
                                            monitor.record_counter(
                                                counters="transmogrifier.addnewrule.activity.group", delta=1
                                            )
                                        elif subscription["name"].startswith("tier0export"):
                                            monitor.record_counter(
                                                counters="transmogrifier.addnewrule.activity.tier0export", delta=1
                                            )
                                        elif subscription["name"].endswith("export"):
                                            monitor.record_counter(
                                                counters="transmogrifier.addnewrule.activity.dataconsolidation", delta=1
                                            )
                                        else:
                                            monitor.record_counter(
                                                counters="transmogrifier.addnewrule.activity.other", delta=1
                                            )
                                    except InvalidReplicationRule, e:
                                        logging.error("Thread %i : %s" % (worker_number, str(e)))
                                        monitor.record_counter(
                                            counters="transmogrifier.addnewrule.errortype.InvalidReplicationRule",
                                            delta=1,
                                        )
                                    except InvalidRuleWeight, e:
                                        logging.error("Thread %i : %s" % (worker_number, str(e)))
                                        monitor.record_counter(
                                            counters="transmogrifier.addnewrule.errortype.InvalidRuleWeight", delta=1
                                        )
                                    except InvalidRSEExpression, e:
                                        logging.error("Thread %i : %s" % (worker_number, str(e)))
                                        monitor.record_counter(
                                            counters="transmogrifier.addnewrule.errortype.InvalidRSEExpression", delta=1
                                        )
                                    except StagingAreaRuleRequiresLifetime, e:
                                        logging.error("Thread %i : %s" % (worker_number, str(e)))
                                        monitor.record_counter(
                                            counters="transmogrifier.addnewrule.errortype.StagingAreaRuleRequiresLifetime",
                                            delta=1,
                                        )
                                    except ReplicationRuleCreationTemporaryFailed, e:
                                        # Should never occur. Just for completness
                                        logging.error("Thread %i : %s" % (worker_number, str(e)))
                                        monitor.record_counter(
                                            counters="transmogrifier.addnewrule.errortype.ReplicationRuleCreationTemporaryFailed",
                                            delta=1,
                                        )