def test_repair_a_rule_with_source_replica_expression(self): """ JUDGE EVALUATOR: Test the judge when a with two rules with source_replica_expression""" scope = InternalScope('mock', **self.vo) files = create_files(3, scope, self.rse4_id) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) # Add a first rule to the DS rule_id1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] rule_id2 = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse3, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, source_replica_expression=self.rse1)[0] assert(RuleState.REPLICATING == get_rule(rule_id1)['state']) assert(RuleState.STUCK == get_rule(rule_id2)['state']) successful_transfer(scope=scope, name=files[0]['name'], rse_id=self.rse1_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=self.rse1_id, nowait=False) successful_transfer(scope=scope, name=files[2]['name'], rse_id=self.rse1_id, nowait=False) # Also make replicas AVAILABLE session = get_session() replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[0]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[1]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[2]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE session.commit() rule_repairer(once=True) assert(RuleState.OK == get_rule(rule_id1)['state']) assert(RuleState.REPLICATING == get_rule(rule_id2)['state'])
def test_to_repair_a_rule_with_only_1_rse_whose_transfers_failed(self): """ JUDGE REPAIRER: Test to repair a rule with only 1 rse whose transfers failed (lock)""" rule_repairer(once=True) # Clean out the repairer scope = InternalScope('mock', **self.vo) files = create_files(4, scope, self.rse4_id, bytes=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) failed_transfer(scope=scope, name=files[3]['name'], rse_id=get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) cancel_request_did(scope=scope, name=files[2]['name'], dest_rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) cancel_request_did(scope=scope, name=files[3]['name'], dest_rse_id=get_replica_locks(scope=files[3]['scope'], name=files[2]['name'])[0].rse_id) assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower()) assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) # Stil assert STUCK because of delays: assert(RuleState.STUCK == get_rule(rule_id)['state']) assert(get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id == get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id)
def test_to_repair_a_rule_with_only_1_rse_whose_transfers_failed(self): """ JUDGE REPAIRER: Test to repair a rule with only 1 rse whose transfers failed (lock)""" rule_repairer(once=True) # Clean out the repairer scope = 'mock' files = create_files(4, scope, self.rse4, bytes=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe') attach_dids(scope, dataset, files, 'jdoe') rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) failed_transfer(scope=scope, name=files[3]['name'], rse_id=get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) cancel_request_did(scope=scope, name=files[2]['name'], dest_rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) cancel_request_did(scope=scope, name=files[3]['name'], dest_rse_id=get_replica_locks(scope=files[3]['scope'], name=files[2]['name'])[0].rse_id) assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower()) assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) # Stil assert STUCK because of delays: assert(RuleState.STUCK == get_rule(rule_id)['state']) assert(get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id == get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id)
def test_to_repair_a_rule_with_NONE_grouping_whose_transfer_failed(self): """ JUDGE REPAIRER: Test to repair a rule with 1 failed transfer (lock)""" rule_repairer(once=True) # Clean out the repairer scope = InternalScope('mock', **self.vo) files = create_files(3, scope, self.rse4_id, bytes=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.T1, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=None)[0] failed_rse_id = get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['state'] == ReplicaState.COPYING) assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['lock_cnt'] == 1) successful_transfer(scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) failed_transfer(scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower()) assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) assert(RuleState.REPLICATING == get_rule(rule_id)['state']) assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['state'] == ReplicaState.UNAVAILABLE) assert(get_replica(scope=files[2]['scope'], name=files[2]['name'], rse_id=failed_rse_id)['lock_cnt'] == 0)
def test_to_repair_a_rule_with_DATASET_grouping_whose_transfer_failed( self): """ JUDGE REPAIRER: Test to repair a rule with 1 failed transfer (lock)""" rule_repairer(once=True) # Clean out the repairer scope = InternalScope('mock') files = create_files(4, scope, self.rse4_id, bytes=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{ 'scope': scope, 'name': dataset }], account=self.jdoe, copies=1, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, activity='DebugJudge')[0] successful_transfer( scope=scope, name=files[0]['name'], rse_id=get_replica_locks(scope=files[0]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) successful_transfer( scope=scope, name=files[1]['name'], rse_id=get_replica_locks(scope=files[1]['scope'], name=files[2]['name'])[0].rse_id, nowait=False) failed_transfer( scope=scope, name=files[2]['name'], rse_id=get_replica_locks(scope=files[2]['scope'], name=files[2]['name'])[0].rse_id) failed_transfer( scope=scope, name=files[3]['name'], rse_id=get_replica_locks(scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) assert (rule_id == get_rule(rule_id)['id'].replace('-', '').lower()) assert (RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) assert (RuleState.REPLICATING == get_rule(rule_id)['state']) assert (get_replica_locks( scope=files[2]['scope'], name=files[2]['name'])[0].rse_id == get_replica_locks( scope=files[3]['scope'], name=files[3]['name'])[0].rse_id) assert (get_replica_locks( scope=files[1]['scope'], name=files[1]['name'])[0].rse_id == get_replica_locks( scope=files[3]['scope'], name=files[3]['name'])[0].rse_id)
def test_repair_a_rule_with_source_replica_expression(self): """ JUDGE EVALUATOR: Test the judge when a with two rules with source_replica_expression""" scope = 'mock' files = create_files(3, scope, self.rse4) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe') attach_dids(scope, dataset, files, 'jdoe') # Add a first rule to the DS rule_id1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] rule_id2 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse3, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, source_replica_expression=self.rse1)[0] assert(RuleState.REPLICATING == get_rule(rule_id1)['state']) assert(RuleState.STUCK == get_rule(rule_id2)['state']) successful_transfer(scope=scope, name=files[0]['name'], rse_id=self.rse1_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=self.rse1_id, nowait=False) successful_transfer(scope=scope, name=files[2]['name'], rse_id=self.rse1_id, nowait=False) # Also make replicas AVAILABLE session = get_session() replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[0]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[1]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[2]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE session.commit() rule_repairer(once=True) assert(RuleState.OK == get_rule(rule_id1)['state']) assert(RuleState.REPLICATING == get_rule(rule_id2)['state'])
def test_judge_inject_rule(self): """ JUDGE INJECTOR: Test the judge when injecting a rule""" scope = 'mock' files = create_files(3, scope, self.rse1_id) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe') attach_dids(scope, dataset, files, 'jdoe') # Add a first rule to the DS rule_id = add_rule(dids=[{ 'scope': scope, 'name': dataset }], account='jdoe', copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, asynchronous=True)[0] assert (get_rule(rule_id)['state'] == RuleState.INJECT) rule_injector(once=True) # Check if the Locks are created properly for file in files: assert (len( get_replica_locks(scope=file['scope'], name=file['name'])) == 2) assert (get_rule(rule_id)['state'] == RuleState.REPLICATING)
def perm_del_rule(issuer, kwargs): """ Checks if an issuer can delete a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :returns: True if account is allowed to call the API call, otherwise False """ if issuer == 'root' or issuer == 'ddmadmin': return True if get_rule(kwargs['rule_id'])['account'] == issuer: return True # Check if user is a country admin admin_in_country = [] for kv in list_account_attributes(account=issuer): if kv['key'].startswith('country-') and kv['value'] == 'admin': admin_in_country.append(kv['key'].partition('-')[2]) rule = get_rule(rule_id=kwargs['rule_id']) rses = parse_expression(rule['rse_expression']) if admin_in_country: for rse in rses: if list_rse_attributes(rse=None, rse_id=rse['id']).get('country') in admin_in_country: return True # DELETERS can approve the rule for rse in rses: rse_attr = list_rse_attributes(rse=None, rse_id=rse['id']) if rse_attr.get('rule_deleters'): if issuer in rse_attr.get('rule_deleters').split(','): return True return False
def test_bb8_rebalance_rule(self): """ BB8: Test the rebalance rule method""" scope = InternalScope('mock', **self.vo) files = create_files(3, scope, self.rse1_id) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0] rule = {} try: rule = get_rule(rule_id) except: assert_raises(RuleNotFound, get_rule, rule_id) child_rule = rebalance_rule(rule, 'Rebalance', self.rse3, priority=3) rule_cleaner(once=True) assert(get_rule(rule_id)['expires_at'] <= datetime.utcnow()) assert(get_rule(rule_id)['child_rule_id'] == child_rule) rule_cleaner(once=True) assert(get_rule(rule_id)['expires_at'] <= datetime.utcnow()) successful_transfer(scope=scope, name=files[0]['name'], rse_id=self.rse3_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=self.rse3_id, nowait=False) with assert_raises(UnsupportedOperation): delete_rule(rule_id) successful_transfer(scope=scope, name=files[2]['name'], rse_id=self.rse3_id, nowait=False) rule_cleaner(once=True) assert(get_rule(child_rule)['state'] == RuleState.OK)
def test_judge_inject_delayed_rule(self): """ JUDGE INJECTOR: Test the judge when injecting a delayed rule""" scope = InternalScope('mock', **self.vo) files = create_files(1, scope, self.rse1_id) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) [file] = files # Add a delayed rule rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, delay_injection=3600)[0] rule = get_rule(rule_id) assert rule['state'] == RuleState.INJECT assert rule['updated_at'] < rule['created_at'] assert datetime.utcnow() + timedelta(seconds=3550) < rule['created_at'] < datetime.utcnow() + timedelta(seconds=3650) # The time to create the rule has not yet arrived. The injector must skip this rule, no locks must be created rule_injector(once=True) assert get_rule(rule_id)['state'] == RuleState.INJECT assert not get_replica_locks(scope=file['scope'], name=file['name']) # simulate that time to inject the rule has arrived @transactional_session def __update_created_at(session=None): session.query(ReplicationRule).filter_by(id=rule_id).one().created_at = datetime.utcnow() __update_created_at() # The injector must create the locks now rule_injector(once=True) assert get_rule(rule_id)['state'] == RuleState.REPLICATING assert len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2
def test_to_repair_a_rule_with_only_1_rse_whose_site_is_blocklisted(self): """ JUDGE REPAIRER: Test to repair a rule with only 1 rse whose site is blocklisted""" rse = rse_name_generator() rse_id = add_rse(rse, **self.vo) update_rse(rse_id, {'availability_write': False}) set_local_account_limit(self.jdoe, rse_id, -1) rule_repairer(once=True) # Clean out the repairer scope = InternalScope('mock', **self.vo) files = create_files(4, scope, self.rse4_id, bytes=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.DATASET, self.jdoe) attach_dids(scope, dataset, files, self.jdoe) rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=rse, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, ignore_availability=True, activity='DebugJudge')[0] assert(RuleState.STUCK == get_rule(rule_id)['state']) rule_repairer(once=True) # Stil assert STUCK because of ignore_availability: assert(RuleState.STUCK == get_rule(rule_id)['state']) region = make_region().configure('dogpile.cache.memcached', expiration_time=3600, arguments={'url': config_get('cache', 'url', False, '127.0.0.1:11211'), 'distributed_lock': True}) region.delete(sha256(rse.encode()).hexdigest()) update_rse(rse_id, {'availability_write': True}) rule_repairer(once=True) assert(RuleState.REPLICATING == get_rule(rule_id)['state'])
def test_bb8_rebalance_rule(vo, root_account, jdoe_account, rse_factory, mock_scope, did_factory): """BB8: Test the rebalance rule method""" rse1, rse1_id = rse_factory.make_posix_rse() rse2, rse2_id = rse_factory.make_posix_rse() # Add Tags T1 = tag_generator() T2 = tag_generator() add_rse_attribute(rse1_id, T1, True) add_rse_attribute(rse2_id, T2, True) # Add fake weights add_rse_attribute(rse1_id, "fakeweight", 10) add_rse_attribute(rse2_id, "fakeweight", 0) # Add quota set_local_account_limit(jdoe_account, rse1_id, -1) set_local_account_limit(jdoe_account, rse2_id, -1) set_local_account_limit(root_account, rse1_id, -1) set_local_account_limit(root_account, rse2_id, -1) files = create_files(3, mock_scope, rse1_id) dataset = did_factory.make_dataset() attach_dids(mock_scope, dataset['name'], files, jdoe_account) set_status(mock_scope, dataset['name'], open=False) # Invalid the cache because the result of parse_expression is cached REGION.invalidate() rule_id = add_rule(dids=[{'scope': mock_scope, 'name': dataset['name']}], account=jdoe_account, copies=1, rse_expression=rse1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0] rule = {} try: rule = get_rule(rule_id) except: pytest.raises(RuleNotFound, get_rule, rule_id) child_rule = rebalance_rule(rule, 'Rebalance', rse2, priority=3) rule_cleaner(once=True) assert(get_rule(rule_id)['expires_at'] <= datetime.utcnow()) assert(get_rule(rule_id)['child_rule_id'] == child_rule) rule_cleaner(once=True) assert(get_rule(rule_id)['expires_at'] <= datetime.utcnow()) successful_transfer(scope=mock_scope, name=files[0]['name'], rse_id=rse2_id, nowait=False) successful_transfer(scope=mock_scope, name=files[1]['name'], rse_id=rse2_id, nowait=False) with pytest.raises(UnsupportedOperation): delete_rule(rule_id) successful_transfer(scope=mock_scope, name=files[2]['name'], rse_id=rse2_id, nowait=False) rule_cleaner(once=True) assert(get_rule(child_rule)['state'] == RuleState.OK) set_metadata(mock_scope, dataset['name'], 'lifetime', -86400) undertaker.run(once=True)
def test_add_rule_with_r2d2_container_treating_and_duplicate_rule(self): """ JUDGE INJECTOR (CORE): Add a replication rule with an r2d2 container treatment and duplicate rule""" scope = 'mock' container = 'asdf.r2d2_request.2016-04-01-15-00-00.ads.' + str(uuid()) add_did(scope, container, DIDType.from_sym('CONTAINER'), 'jdoe') datasets = [] for i in range(3): files = create_files(3, scope, self.rse1_id) dataset = 'dataset_' + str(uuid()) datasets.append(dataset) add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe') attach_dids(scope, dataset, files, 'jdoe') attach_dids(scope, container, [{ 'scope': scope, 'name': dataset }], 'jdoe') add_rule(dids=[{ 'scope': scope, 'name': dataset }], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=900, locked=False, subscription_id=None, ask_approval=False) rule_id = add_rule(dids=[{ 'scope': scope, 'name': container }], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=900, locked=False, subscription_id=None, ask_approval=True)[0] approve_rule(rule_id, approver='root') assert (get_rule(rule_id)['state'] == RuleState.INJECT) rule_injector(once=True) # Check if there is a rule for each file with assert_raises(RuleNotFound): get_rule(rule_id) for dataset in datasets: assert (len( [r for r in list_rules({ 'scope': scope, 'name': dataset })]) > 0)
def perm_approve_rule(issuer, kwargs, session=None): """ Checks if an issuer can approve a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :param session: The DB session to use :returns: True if account is allowed to call the API call, otherwise False """ if _is_root(issuer) or has_account_attribute( account=issuer, key='admin', session=session): return True rule = get_rule(rule_id=kwargs['rule_id']) rses = parse_expression(rule['rse_expression'], filter_={'vo': issuer.vo}, session=session) # Those in rule_approvers can approve the rule for rse in rses: rse_attr = list_rse_attributes(rse_id=rse['id'], session=session) rule_approvers = rse_attr.get('rule_approvers', None) if rule_approvers and issuer.external in rule_approvers.split(','): return True return False
def get_replication_rule(rule_id): """ Get replication rule by it's id. :param rule_id: The rule_id to get. """ return rule.get_rule(rule_id)
def test_judge_deny_rule(self): """ JUDGE INJECTOR: Test the judge when asking approval for a rule and denying it""" scope = 'mock' files = create_files(3, scope, self.rse1_id) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe') attach_dids(scope, dataset, files, 'jdoe') # Add a first rule to the DS rule_id = add_rule(dids=[{ 'scope': scope, 'name': dataset }], account='jdoe', copies=1, rse_expression=self.rse4, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, ask_approval=True)[0] assert (get_rule(rule_id)['state'] == RuleState.WAITING_APPROVAL) deny_rule(rule_id=rule_id, approver='root') assert_raises(RuleNotFound, get_rule, rule_id)
def select_algorithm(algorithm, rule_ids, params): """ Method used in case of chained subscriptions :param algorithm: Algorithm used for the chained rule. Now only associated_site associated_site : Choose an associated endpoint according to the RSE attribute assoiciated_site :param rule_ids: List of parent rules :param params: List of rules parameters to be used by the algorithm """ selected_rses = {} if algorithm == 'associated_site': for rule_id in rule_ids: rule = get_rule(rule_id) logging.debug('In select_algorithm, %s', str(rule)) rse = rule['rse_expression'] vo = rule['account'].vo if rse_exists(rse, vo=vo): rse_id = get_rse_id(rse, vo=vo) rse_attributes = list_rse_attributes(rse_id) associated_sites = rse_attributes.get('associated_sites', None) associated_site_idx = params.get('associated_site_idx', None) if not associated_site_idx: raise SubscriptionWrongParameter('Missing parameter associated_site_idx') if associated_sites: associated_sites = associated_sites.split(',') if associated_site_idx > len(associated_sites) + 1: raise SubscriptionWrongParameter('Parameter associated_site_idx is out of range') associated_site = associated_sites[associated_site_idx - 1] selected_rses[associated_site] = {'source_replica_expression': rse, 'weight': None} else: raise SubscriptionWrongParameter('Algorithm associated_site only works with split_rule') if rule['copies'] != 1: raise SubscriptionWrongParameter('Algorithm associated_site only works with split_rule') return selected_rses
def get_replication_rule(rule_id, estimate_ttc=None): """ Get replication rule by it's id. :param rule_id: The rule_id to get. """ return rule.get_rule(rule_id, estimate_ttc)
def get_replication_rule(rule_id, estimate_ttc=None): """ Get replication rule by it's id. :param rule_id: The rule_id to get. """ result = rule.get_rule(rule_id, estimate_ttc) return api_update_return_dict(result)
def perm_access_rule_vo(issuer, kwargs): """ Checks if we're at the same VO as the rule_id's :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :returns: True if account is allowed, otherwise False """ return get_rule(kwargs['rule_id'])['scope'].vo == issuer.vo
def perm_update_rule(issuer, kwargs): """ Checks if an issuer can update a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :returns: True if account is allowed to call the API call, otherwise False """ # Admin accounts can do everything if issuer == 'root' or has_account_attribute(account=issuer, key='admin'): return True # Only admin accounts can change account, state, priority of a rule if 'account' in kwargs['options'] or\ 'state' in kwargs['options'] or\ 'priority' in kwargs['options'] or\ 'child_rule_id' in kwargs['options'] or\ 'meta' in kwargs['options']: return False # Only priv accounts are allowed to change that # Country admins are allowed to change the rest. admin_in_country = [] for kv in list_account_attributes(account=issuer): if kv['key'].startswith('country-') and kv['value'] == 'admin': admin_in_country.append(kv['key'].partition('-')[2]) rule = get_rule(rule_id=kwargs['rule_id']) rses = parse_expression(rule['rse_expression']) if admin_in_country: for rse in rses: if list_rse_attributes( rse=None, rse_id=rse['id']).get('country') in admin_in_country: return True # Only admin and country-admin are allowed to change locked state of rule if 'locked' in kwargs['options']: return False # Owner can change the rest of a rule if get_rule(kwargs['rule_id'])['account'] == issuer: return True return False
def test_atropos(root_account, rse_factory, mock_scope, did_factory, rucio_client): """ Test the behaviour of atropos """ today = datetime.now() check_date = datetime.now() + timedelta(days=365) check_date = check_date.isoformat().split('T')[0] # Define a policy lifetime_dir = '/opt/rucio/etc/policies' os.makedirs('/opt/rucio/etc/policies', exist_ok=True) lifetime_policy = [{'name': 'Test', 'include': {'datatype': ['RAW'], 'project': ['data%']}, 'age': '6', 'extension': '1'}] with open('%s/config_other.json' % lifetime_dir, 'w') as outfile: json.dump(lifetime_policy, outfile) REGION.invalidate() nb_datasets = 2 today = datetime.now() rse, rse_id = rse_factory.make_posix_rse() datasets = [did_factory.make_dataset() for _ in range(nb_datasets)] rules = list() expiration_date = None # Check that the eol_at is properly set # Rule on dataset 0 that matches the policy should get an eol_at # Rule on dataset 1 that doesn't matches the policy should not get an eol_at for cnt, dataset in enumerate(datasets): if cnt == 0: set_metadata(dataset['scope'], dataset['name'], 'datatype', 'RAW') set_metadata(dataset['scope'], dataset['name'], 'project', 'data') rule_ids = add_rule(dids=[{'scope': dataset['scope'], 'name': dataset['name']}], account=root_account, copies=1, rse_expression=rse, grouping='DATASET', weight=None, lifetime=None, locked=None, subscription_id=None) rules.append(rule_ids[0]) rule = get_rule(rule_ids[0]) if cnt == 0: expiration_date = rule['eol_at'] assert expiration_date is not None assert expiration_date - today < timedelta(181) assert expiration_date - today > timedelta(179) else: assert rule['eol_at'] is None # Run atropos in dry-run mode to set eol_at on the dataset # Dataset 0 should get eol_at # Dataset 1 should not get eol_at atropos(thread=1, bulk=100, date_check=datetime.strptime(check_date, '%Y-%m-%d'), dry_run=True, grace_period=86400, once=True, unlock=False, spread_period=0, purge_replicas=False, sleep_time=60) for cnt, dataset in enumerate(datasets): meta = get_metadata(dataset['scope'], dataset['name']) if cnt == 0: assert meta['eol_at'] is not None assert meta['eol_at'] == expiration_date else: assert meta['eol_at'] is None # Clean-up os.remove('/opt/rucio/etc/policies/config_other.json')
def test_judge_evaluate_detach(self): """ JUDGE EVALUATOR: Test if the detach is done correctly""" re_evaluator(once=True) scope = InternalScope('mock', **self.vo) container = 'container_' + str(uuid()) add_did(scope, container, DIDType.from_sym('CONTAINER'), self.jdoe) scope = InternalScope('mock', **self.vo) files = create_files(3, scope, self.rse1_id, bytes=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), self.jdoe) attach_dids(scope, dataset, files, self.jdoe) attach_dids(scope, container, [{'scope': scope, 'name': dataset}], self.jdoe) scope = InternalScope('mock', **self.vo) files = create_files(3, scope, self.rse1_id, bytes=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), self.jdoe) attach_dids(scope, dataset, files, self.jdoe) attach_dids(scope, container, [{'scope': scope, 'name': dataset}], self.jdoe) scope = InternalScope('mock', **self.vo) files = create_files(3, scope, self.rse1_id, bytes=100) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), self.jdoe) attach_dids(scope, dataset, files, self.jdoe) attach_dids(scope, container, [{'scope': scope, 'name': dataset}], self.jdoe) # Add a first rule to the Container rule_id = add_rule(dids=[{'scope': scope, 'name': container}], account=self.jdoe, copies=1, rse_expression=self.rse1, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None)[0] # Fake judge re_evaluator(once=True) assert(9 == get_rule(rule_id)['locks_ok_cnt']) detach_dids(scope, dataset, [files[0]]) # Fake judge re_evaluator(once=True) assert(8 == get_rule(rule_id)['locks_ok_cnt'])
def test_get_rule(self): """ REPLICATION RULE (CORE): Test to get a previously created rule""" scope = 'mock' files = create_files(3, scope, self.rse1) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe') attach_dids(scope, dataset, files, 'jdoe') rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='NONE', weight='fakeweight', lifetime=None, locked=False, subscription_id=None)[0] assert(rule_id == get_rule(rule_id)['id'].replace('-', '').lower()) assert_raises(RuleNotFound, get_rule, uuid())
def perm_move_rule(issuer, kwargs, session=None): """ Checks if an issuer can move a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :param session: The DB session to use :returns: True if account is allowed to call the API call, otherwise False """ return perm_default(issuer, kwargs, session=session)\ or has_account_attribute(account=issuer, key='rule_admin', session=session)\ or get_rule(kwargs['rule_id'], session=session)['account'] == issuer
def get_replication_rule(rule_id, issuer, estimate_ttc=None, vo='def'): """ Get replication rule by it's id. :param rule_id: The rule_id to get. :param issuer: The issuing account of this operation. :param vo: The VO of the issuer. """ kwargs = {'rule_id': rule_id} if not has_permission(issuer=issuer, vo=vo, action='access_rule_vo', kwargs=kwargs): raise AccessDenied('Account %s can not access rules at other VOs.' % (issuer)) result = rule.get_rule(rule_id, estimate_ttc) return api_update_return_dict(result)
def perm_del_rule(issuer, kwargs): """ Checks if an issuer can delete a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :returns: True if account is allowed to call the API call, otherwise False """ if _is_root(issuer) or has_account_attribute(account=issuer, key='admin'): return True if get_rule(kwargs['rule_id'])['account'] == issuer: return True return False
def test_judge_ask_approval(self): """ JUDGE INJECTOR: Test the judge when asking approval for a rule""" scope = InternalScope('mock') files = create_files(3, scope, self.rse1_id) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), self.jdoe) attach_dids(scope, dataset, files, self.jdoe) # Add a first rule to the DS rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account=self.jdoe, copies=1, rse_expression=self.rse4, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, ask_approval=True)[0] assert(get_rule(rule_id)['state'] == RuleState.WAITING_APPROVAL) approve_rule(rule_id=rule_id, approver=self.jdoe) assert(get_rule(rule_id)['state'] == RuleState.INJECT) rule_injector(once=True) # Check if the Locks are created properly for file in files: assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 1) assert(get_rule(rule_id)['state'] == RuleState.REPLICATING)
def perm_update_rule(issuer, kwargs): """ Checks if an issuer can update a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :returns: True if account is allowed to call the API call, otherwise False """ if issuer == 'root' or issuer in get_special_accounts(): return True if 'account' in kwargs['options']: return False # Only priv accounts are allowed to change owner if get_rule(kwargs['rule_id'])['account'] == issuer: return True return False
def perm_approve_rule(issuer, kwargs): """ Checks if an issuer can approve a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :returns: True if account is allowed to call the API call, otherwise False """ # Admin accounts can do everything if issuer == 'root' or has_account_attribute(account=issuer, key='admin'): return True rule = get_rule(rule_id=kwargs['rule_id']) rses = parse_expression(rule['rse_expression']) # APPROVERS can approve the rule for rse in rses: rse_attr = list_rse_attributes(rse=rse['rse']) if rse_attr.get('rule_approvers'): if issuer in rse_attr.get('rule_approvers').split(','): return True # LOCALGROUPDISK/LOCALGROUPTAPE admins can approve the rule admin_in_country = [] for kv in list_account_attributes(account=issuer): if kv['key'].startswith('country-') and kv['value'] == 'admin': admin_in_country.append(kv['key'].partition('-')[2]) if admin_in_country: for rse in rses: rse_attr = list_rse_attributes(rse=rse['rse']) if rse_attr.get('type', '') in ('LOCALGROUPDISK', 'LOCALGROUPTAPE'): if rse_attr.get('country', '') in admin_in_country: return True # GROUPDISK admins can approve the rule admin_for_phys_group = [] for kv in list_account_attributes(account=issuer): if kv['key'].startswith('group-') and kv['value'] == 'admin': admin_for_phys_group.append(kv['key'].partition('-')[2]) if admin_for_phys_group: for rse in rses: rse_attr = list_rse_attributes(rse=rse['rse']) if rse_attr.get('type', '') == 'GROUPDISK': if rse_attr.get('physgroup', '') in admin_for_phys_group: return True return False
def perm_del_rule(issuer, kwargs): """ Checks if an issuer can delete a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :returns: True if account is allowed to call the API call, otherwise False """ if issuer == 'root' or issuer == 'ddmadmin': return True if get_rule(kwargs['rule_id'])['account'] == issuer: return True # If issuer is a country admin # Resolve the list of rses the user is allowed to manage # Check if there is an overlap in this list and in lock.get_replica_locks_for_rule_id_per_rse(rule_id=rule_id) # return True return False
def perm_move_rule(issuer, kwargs): """ Checks if an issuer can move a replication rule. :param issuer: Account identifier which issues the command. :param kwargs: List of arguments for the action. :returns: True if account is allowed to call the API call, otherwise False """ # Admin accounts can do everything if issuer == 'root' or has_account_attribute(account=issuer, key='admin'): return True # Country admins are allowed to change the but need to be admin for the original, as well as future rule admin_in_country = [] for kv in list_account_attributes(account=issuer): if kv['key'].startswith('country-') and kv['value'] == 'admin': admin_in_country.append(kv['key'].partition('-')[2]) admin_source = False admin_destination = False if admin_in_country: rule = get_rule(rule_id=kwargs['rule_id']) rses = parse_expression(rule['rse_expression']) for rse in rses: if list_rse_attributes( rse=None, rse_id=rse['id']).get('country') in admin_in_country: admin_source = True break rses = parse_expression(kwargs['rse_expression']) for rse in rses: if list_rse_attributes( rse=None, rse_id=rse['id']).get('country') in admin_in_country: admin_destination = True break if admin_source and admin_destination: return True return False
def get_replication_rule(rule_id, issuer, vo='def', session=None): """ Get replication rule by it's id. :param rule_id: The rule_id to get. :param issuer: The issuing account of this operation. :param vo: The VO of the issuer. :param session: The database session in use. """ kwargs = {'rule_id': rule_id} if is_multi_vo( session=session) and not has_permission(issuer=issuer, vo=vo, action='access_rule_vo', kwargs=kwargs, session=session): raise AccessDenied('Account %s can not access rules at other VOs.' % (issuer)) result = rule.get_rule(rule_id, session=session) return api_update_return_dict(result, session=session)
def select_algorithm(algorithm, rule_ids, params): """ Method used in case of chained subscriptions :param algorithm: Algorithm used for the chained rule. Now only associated_site associated_site : Choose an associated endpoint according to the RSE attribute assoiciated_site :param rule_ids: List of parent rules :param params: List of rules parameters to be used by the algorithm """ selected_rses = {} if algorithm == "associated_site": for rule_id in rule_ids: rule = get_rule(rule_id) logging.debug("In select_algorithm, %s", str(rule)) rse = rule["rse_expression"] vo = rule["account"].vo if rse_exists(rse, vo=vo): rse_id = get_rse_id(rse, vo=vo) rse_attributes = list_rse_attributes(rse_id) associated_sites = rse_attributes.get("associated_sites", None) associated_site_idx = params.get("associated_site_idx", None) if not associated_site_idx: raise SubscriptionWrongParameter( "Missing parameter associated_site_idx") if associated_sites: associated_sites = associated_sites.split(",") if associated_site_idx > len(associated_sites) + 1: raise SubscriptionWrongParameter( "Parameter associated_site_idx is out of range") associated_site = associated_sites[associated_site_idx - 1] selected_rses[associated_site] = { "source_replica_expression": rse, "weight": None, } else: raise SubscriptionWrongParameter( "Algorithm associated_site only works with split_rule") if rule["copies"] != 1: raise SubscriptionWrongParameter( "Algorithm associated_site only works with split_rule") return selected_rses
def delete_sync_rule(rule_id, session=None): rule = get_rule(rule_id, session=session) if rule["did_type"] != DIDType.DATASET: raise RuntimeError("Rule applies to did with wrong type") block = rule["name"] try: rse_id = get_rse_id(rse=rule["rse_expression"], session=session) except RSENotFound: raise RuntimeError("Rule does not apply to a specific RSE") scope = rule["scope"] account = rule["account"] files = [] for file in list_files(scope, block, long=False, session=session): files.append( {"scope": scope, "name": file["name"], "rse_id": rse_id, "state": "U"} ) update_replicas_states( replicas=files, add_tombstone=False, session=session ) delete_rule(rule_id=rule_id, purge_replicas=True, soft=False, session=session)
def test_repair_a_rule_with_missing_locks(self): """ JUDGE EVALUATOR: Test the judge when a rule gets STUCK from re_evaluating and there are missing locks""" scope = 'mock' files = create_files(3, scope, self.rse4) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe') # Add a first rule to the DS rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] attach_dids(scope, dataset, files, 'jdoe') # Fake judge re_evaluator(once=True) # Check if the Locks are created properly for file in files: assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2) # Add more files to the DID files2 = create_files(3, scope, self.rse4) attach_dids(scope, dataset, files2, 'jdoe') # Mark the rule STUCK to fake that the re-evaluation failed session = get_session() rule = session.query(models.ReplicationRule).filter_by(id=rule_id).one() rule.state = RuleState.STUCK session.commit() rule_repairer(once=True) for file in files: assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2) for file in files2: assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2) assert(len(set([lock.rse_id for lock in get_replica_locks(scope=files[0]['scope'], name=files[0]['name'])]).intersection(set([lock.rse_id for lock in get_replica_locks(scope=file['scope'], name=file['name'])]))) == 2) assert(12 == get_rule(rule_id)['locks_replicating_cnt'])