def test_repair_a_rule_with_source_replica_expression(self): """ JUDGE EVALUATOR: Test the judge when a with two rules with source_replica_expression""" scope = 'mock' files = create_files(3, scope, self.rse4) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe') attach_dids(scope, dataset, files, 'jdoe') # Add a first rule to the DS rule_id1 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] rule_id2 = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=1, rse_expression=self.rse3, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None, source_replica_expression=self.rse1)[0] assert(RuleState.REPLICATING == get_rule(rule_id1)['state']) assert(RuleState.STUCK == get_rule(rule_id2)['state']) successful_transfer(scope=scope, name=files[0]['name'], rse_id=self.rse1_id, nowait=False) successful_transfer(scope=scope, name=files[1]['name'], rse_id=self.rse1_id, nowait=False) successful_transfer(scope=scope, name=files[2]['name'], rse_id=self.rse1_id, nowait=False) # Also make replicas AVAILABLE session = get_session() replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[0]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[1]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE replica = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=files[2]['name'], rse_id=self.rse1_id).one() replica.state = ReplicaState.AVAILABLE session.commit() rule_repairer(once=True) assert(RuleState.OK == get_rule(rule_id1)['state']) assert(RuleState.REPLICATING == get_rule(rule_id2)['state'])
def test_db_connection(self): """ DB (CORE): Test db connection """ session = get_session() if session.bind.dialect.name == 'oracle': session.execute('select 1 from dual') else: session.execute('select 1') session.close()
def test_repair_a_rule_with_missing_locks(self): """ JUDGE EVALUATOR: Test the judge when a rule gets STUCK from re_evaluating and there are missing locks""" scope = 'mock' files = create_files(3, scope, self.rse4) dataset = 'dataset_' + str(uuid()) add_did(scope, dataset, DIDType.from_sym('DATASET'), 'jdoe') # Add a first rule to the DS rule_id = add_rule(dids=[{'scope': scope, 'name': dataset}], account='jdoe', copies=2, rse_expression=self.T1, grouping='DATASET', weight=None, lifetime=None, locked=False, subscription_id=None)[0] attach_dids(scope, dataset, files, 'jdoe') # Fake judge re_evaluator(once=True) # Check if the Locks are created properly for file in files: assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2) # Add more files to the DID files2 = create_files(3, scope, self.rse4) attach_dids(scope, dataset, files2, 'jdoe') # Mark the rule STUCK to fake that the re-evaluation failed session = get_session() rule = session.query(models.ReplicationRule).filter_by(id=rule_id).one() rule.state = RuleState.STUCK session.commit() rule_repairer(once=True) for file in files: assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2) for file in files2: assert(len(get_replica_locks(scope=file['scope'], name=file['name'])) == 2) assert(len(set([lock.rse_id for lock in get_replica_locks(scope=files[0]['scope'], name=files[0]['name'])]).intersection(set([lock.rse_id for lock in get_replica_locks(scope=file['scope'], name=file['name'])]))) == 2) assert(12 == get_rule(rule_id)['locks_replicating_cnt'])
#!/usr/bin/env python # Copyright European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Authors: # - Vincent Garonne, <*****@*****.**>, 2014 from rucio.db import session, models from rucio.db.constants import IdentityType if __name__ == '__main__': up_id = 'ami' up_pwd = 'b009bb87d4be2c6e366d07bdf5e16eab772bd5e773f9b4b51df9e8b169605943' up_email = '*****@*****.**' account = 'atagcol' s = session.get_session() identity1 = models.Identity(identity=up_id, identity_type=IdentityType.USERPASS, password=up_pwd, salt='0', email=up_email) iaa1 = models.IdentityAccountAssociation(identity=identity1.identity, identity_type=identity1.identity_type, account=account, is_default=False) # Apply # s.add_all([identity1]) # s.commit() s.add_all([iaa1]) s.commit()
def __init__(self, broker): self.__broker = broker self.__session = get_session()
argparser.add_argument('--num', '-n', help="Scan a fixed number of available rules", type=int) if len(sys.argv) == 1: argparser.print_help() sys.exit(-1) args = argparser.parse_args() # if args.all: # print 'all' # elif args.fraction is not None: # print 'fraction' # elif args.num is not None: # print 'num' session = get_session() total_cnt = session.query(models.ReplicationRule).count() print "There are currently %d replication rules registered in Rucio" % total_cnt if session.bind.dialect.name != 'sqlite': query = session.query(models.ReplicationRule).order_by('dbms_random.value') else: query = session.query(models.ReplicationRule).order_by('RANDOM()') if args.fraction is not None: print 'Reading up to %d rules (fraction=%f)' % (int(total_cnt * args.fraction), args.fraction) if args.fraction > 1 or args.fraction <= 0: raise ValueError('The fraction value must be between 0 and 1') query = query.limit(int(total_cnt * args.fraction)) elif args.num is not None:
def migrate(total_workers, worker_number): print 'Worker %(worker_number)s/%(total_workers)s' % locals() session = get_session() try: query = ''' DECLARE TYPE t_string IS TABLE OF VARCHAR2(255) INDEX BY PLS_INTEGER; TYPE t_dates IS TABLE OF TIMESTAMP INDEX BY PLS_INTEGER; TYPE t_numbers IS TABLE OF NUMBER INDEX BY PLS_INTEGER; TYPE t_uuids IS TABLE OF RAW(16) INDEX BY PLS_INTEGER; TYPE t_paths IS TABLE OF VARCHAR2(1024) INDEX BY PLS_INTEGER; scopes t_string; names t_string; guids t_uuids; rseids t_uuids; filesizes t_numbers; filebytes t_numbers; adler32s t_string; md5s t_string; dates t_dates; paths t_paths; CURSOR f_cur IS WITH r as (select /*+ INDEX(TMP_REPLICAS TMP_REPLICAS_IDX) */ name, rse_id, path from atlas_rucio.tmp_replicas where is_migrated is not null and ORA_HASH(name, %s) = %s) select rse_id, f.scope, name, hextoraw(replace(guid, '-', '')), filesize, case when INSTR(checksum,'ad:', 1) >0 then replace(checksum, 'ad:', '') end, case when INSTR(checksum,'md5:', 1) >0 then replace(checksum, 'md5:', '') end, creationdate, path from atlas_dq2.t_10_files f, r where f.lfn=r.name and f.scope is not null and exists (select 1 from atlas_rucio.scopes s where s.scope=f.scope); -- _VIEW@ADCR_ADG.CERN.CH BEGIN OPEN f_cur; LOOP FETCH f_cur BULK COLLECT INTO rseids, scopes, names, guids, filesizes, adler32s, md5s, dates, paths LIMIT 5000; FORALL i IN 1 .. scopes.count MERGE INTO atlas_rucio.DIDS D USING DUAL ON (D.scope = scopes(i) AND D.name = names(i)) WHEN NOT MATCHED THEN INSERT (SCOPE, NAME, ACCOUNT, DID_TYPE, IS_OPEN, MONOTONIC, HIDDEN, OBSOLETE, COMPLETE, IS_NEW, AVAILABILITY, SUPPRESSED, BYTES, LENGTH, MD5, ADLER32, EXPIRED_AT, DELETED_AT, EVENTS, GUID, PROJECT, DATATYPE, RUN_NUMBER, STREAM_NAME, PROD_STEP, VERSION, CAMPAIGN, UPDATED_AT, CREATED_AT) VALUES (scopes(i), names(i), 'root', 'F', 0, 0, 0, 0, 0, NULL, 'A', 0, filesizes(i), 1, md5s(i), adler32s(i), '', '', Null, guids(i), '', '', Null, '', '', '', '', dates(i), dates(i)); FORALL i IN 1 .. names.COUNT INSERT INTO atlas_rucio.REPLICAS F (SCOPE, NAME, RSE_ID, BYTES, MD5, ADLER32, STATE, LOCK_CNT, ACCESSED_AT, TOMBSTONE, PATH, UPDATED_AT, CREATED_AT) VALUES (scopes(i), names(i), rseids(i), filesizes(i), md5s(i), adler32s(i), 'A', 0, '', '', paths(i), dates(i), dates(i)); FORALL i IN 1 .. names.COUNT UPDATE atlas_rucio.tmp_replicas SET IS_MIGRATED = NULL WHERE rse_id = rseids(i) and name = names(i); FORALL i IN filesizes.first .. filesizes.last INSERT INTO atlas_rucio.UPDATED_RSE_COUNTERS (ID, RSE_ID, FILES, BYTES, UPDATED_AT, CREATED_AT) VALUES (SYS_GUID(), rseids(i), 1, filesizes(i), sys_extract_utc(systimestamp), sys_extract_utc(systimestamp)); -- UPDATE atlas_rucio.RSE_COUNTERS -- SET files=files+1, bytes=bytes+filesizes(i), updated_at=sysdate -- WHERE RSE_ID = rseids(i); COMMIT; EXIT WHEN f_cur%%NOTFOUND; END LOOP; CLOSE f_cur; END;''' % (total_workers, worker_number) session.execute(query) finally: session.close() print 'Worker %(worker_number)s/%(total_workers)s done' % locals()
def request_transfer(once=False, src=None, dst=None): """ Main loop to request a new transfer. """ logging.info('request: starting') site_a = 'RSE%s' % generate_uuid().upper() site_b = 'RSE%s' % generate_uuid().upper() scheme = 'https' impl = 'rucio.rse.protocols.webdav.Default' if not src.startswith('https://'): scheme = 'srm' impl = 'rucio.rse.protocols.srm.Default' srctoken = src.split(':')[0] dsttoken = dst.split(':')[0] tmp_proto = { 'impl': impl, 'scheme': scheme, 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}} rse.add_rse(site_a) tmp_proto['hostname'] = src.split(':')[1][2:] tmp_proto['port'] = src.split(':')[2].split('/')[0] tmp_proto['prefix'] = '/'.join([''] + src.split(':')[2].split('/')[1:]) if scheme == 'srm': tmp_proto['extended_attributes'] = {'space_token': srctoken, 'web_service_path': ''} rse.add_protocol(site_a, tmp_proto) tmp_proto = { 'impl': impl, 'scheme': scheme, 'domains': { 'lan': {'read': 1, 'write': 1, 'delete': 1}, 'wan': {'read': 1, 'write': 1, 'delete': 1}}} rse.add_rse(site_b) tmp_proto['hostname'] = dst.split(':')[1][2:] tmp_proto['port'] = dst.split(':')[2].split('/')[0] tmp_proto['prefix'] = '/'.join([''] + dst.split(':')[2].split('/')[1:]) if scheme == 'srm': tmp_proto['extended_attributes'] = {'space_token': dsttoken, 'web_service_path': ''} rse.add_protocol(site_b, tmp_proto) si = rsemanager.get_rse_info(site_a) session = get_session() logging.info('request: started') while not graceful_stop.is_set(): try: ts = time.time() tmp_name = generate_uuid() # add a new dataset did.add_did(scope='mock', name='dataset-%s' % tmp_name, type=DIDType.DATASET, account='root', session=session) # construct PFN pfn = rsemanager.lfns2pfns(si, lfns=[{'scope': 'mock', 'name': 'file-%s' % tmp_name}])['mock:file-%s' % tmp_name] # create the directories if needed p = rsemanager.create_protocol(si, operation='write', scheme=scheme) p.connect() try: p.mkdir(pfn) except: pass # upload the test file try: fp = os.path.dirname(config_get('injector', 'file')) fn = os.path.basename(config_get('injector', 'file')) p.put(fn, pfn, source_dir=fp) except: logging.critical('Could not upload, removing temporary DID: %s' % str(sys.exc_info())) did.delete_dids([{'scope': 'mock', 'name': 'dataset-%s' % tmp_name}], account='root', session=session) break # add the replica replica.add_replica(rse=site_a, scope='mock', name='file-%s' % tmp_name, bytes=config_get_int('injector', 'bytes'), adler32=config_get('injector', 'adler32'), md5=config_get('injector', 'md5'), account='root', session=session) # to the dataset did.attach_dids(scope='mock', name='dataset-%s' % tmp_name, dids=[{'scope': 'mock', 'name': 'file-%s' % tmp_name, 'bytes': config_get('injector', 'bytes')}], account='root', session=session) # add rule for the dataset ts = time.time() rule.add_rule(dids=[{'scope': 'mock', 'name': 'dataset-%s' % tmp_name}], account='root', copies=1, rse_expression=site_b, grouping='ALL', weight=None, lifetime=None, locked=False, subscription_id=None, activity='mock-injector', session=session) logging.info('added rule for %s for DID mock:%s' % (site_b, tmp_name)) record_timer('daemons.mock.conveyorinjector.add_rule', (time.time()-ts)*1000) record_counter('daemons.mock.conveyorinjector.request_transfer') session.commit() except: session.rollback() logging.critical(traceback.format_exc()) if once: return logging.info('request: graceful stop requested') logging.info('request: graceful stop done')