def run(once=False, threads=1, sleep_time=30, did_limit=100): """ Starts up the Judge-Eval threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException( 'Database was not updated, daemon won\'t start') executable = 'judge-evaluator' hostname = socket.gethostname() sanity_check(executable=executable, hostname=hostname) if once: re_evaluator(once=once, did_limit=did_limit) else: logging.info('Evaluator starting %s threads' % str(threads)) threads = [ threading.Thread(target=re_evaluator, kwargs={ 'once': once, 'sleep_time': sleep_time, 'did_limit': did_limit }) for i in range(0, threads) ] [t.start() for t in threads] # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
def run(once=False, threads=1): """ Starts up the Abacus-Collection-Replica threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException( 'Database was not updated, daemon won\'t start') executable = 'abacus-collection-replica' hostname = socket.gethostname() sanity_check(executable=executable, hostname=hostname) if once: logging.info('main: executing one iteration only') collection_replica_update(once) else: logging.info('main: starting threads') threads = [ threading.Thread(target=collection_replica_update, kwargs={'once': once}) for i in range(0, threads) ] [t.start() for t in threads] logging.info('main: waiting for interrupts') # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
def run(threads=1, bulk=100, once=False, sleep_time=60): """ Starts up the minos threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException( 'Database was not updated, daemon won\'t start') if once: logging.log(logging.INFO, 'Will run only one iteration in a single threaded mode') minos_tu_expiration(bulk=bulk, once=once) else: logging.log(logging.INFO, 'Starting Minos Temporary Expiration threads') thread_list = [ threading.Thread(target=minos_tu_expiration, kwargs={ 'once': once, 'sleep_time': sleep_time, 'bulk': bulk }) for _ in range(0, threads) ] [thread.start() for thread in thread_list] logging.log(logging.INFO, 'Waiting for interrupts') # Interruptible joins require a timeout. while thread_list: thread_list = [ thread.join(timeout=3.14) for thread in thread_list if thread and thread.is_alive() ]
def run(threads: int = 1, bulk: int = 100, once: bool = False, sleep_time: int = 60) -> None: """ Starts up the transmogrifier threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException("Database was not updated, daemon won't start") if once: logging.info("Will run only one iteration in a single threaded mode") transmogrifier(bulk=bulk, once=once) else: logging.info("starting transmogrifier threads") thread_list = [ threading.Thread( target=transmogrifier, kwargs={ "once": once, "sleep_time": sleep_time, "bulk": bulk }, ) for _ in range(0, threads) ] [thread.start() for thread in thread_list] logging.info("waiting for interrupts") # Interruptible joins require a timeout. while thread_list: thread_list = [ thread.join(timeout=3.14) for thread in thread_list if thread and thread.is_alive() ]
def run(num_thread=1): """ Starts up the rucio cache consumer thread """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException( 'Database was not updated, daemon won\'t start') logging.info('starting consumer thread') threads = [ threading.Thread(target=consumer, kwargs={ 'id': i, 'num_thread': num_thread }) for i in range(0, num_thread) ] [t.start() for t in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while threads[0].isAlive(): [t.join(timeout=3.14) for t in threads]
def run(total_workers=1, once=False, inputfile=None, sleep_time=-1): """ Starts up the automatix threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException('Database was not updated, daemon won\'t start') try: sites = [s.strip() for s in config_get('automatix', 'sites').split(',')] except (NoOptionError, NoSectionError, RuntimeError): raise Exception('Could not load sites from configuration') if not inputfile: inputfile = '/opt/rucio/etc/automatix.json' if sleep_time == -1: try: sleep_time = config_get('automatix', 'sleep_time') except (NoOptionError, NoSectionError, RuntimeError): sleep_time = 30 try: account = config_get('automatix', 'account') except (NoOptionError, NoSectionError, RuntimeError): account = 'root' try: dataset_lifetime = config_get('automatix', 'dataset_lifetime') except (NoOptionError, NoSectionError, RuntimeError): dataset_lifetime = None try: set_metadata = config_get('automatix', 'set_metadata') except (NoOptionError, NoSectionError, RuntimeError): set_metadata = False try: scope = config_get('automatix', 'scope') client = Client() filters = {'scope': InternalScope('*', vo=client.vo)} if InternalScope(scope, vo=client.vo) not in list_scopes(filter_=filters): logging.log(logging.ERROR, 'Scope %s does not exist. Exiting', scope) GRACEFUL_STOP.set() except Exception: scope = False threads = list() for worker_number in range(0, total_workers): kwargs = {'worker_number': worker_number, 'total_workers': total_workers, 'once': once, 'sites': sites, 'sleep_time': sleep_time, 'account': account, 'inputfile': inputfile, 'set_metadata': set_metadata, 'scope': scope, 'dataset_lifetime': dataset_lifetime} threads.append(threading.Thread(target=automatix, kwargs=kwargs)) [thread.start() for thread in threads] while threads[0].is_alive(): logging.log(logging.DEBUG, 'Still %i active threads', len(threads)) [thread.join(timeout=3.14) for thread in threads]
def run(once=False, younger_than=3, nattempts=10, vos=None, limit_suspicious_files_on_rse=5): """ Starts up the Suspicious-Replica-Recoverer threads. """ setup_logging() logger = formatted_logger(logging.log) if rucio.db.sqla.util.is_old_db(): raise DatabaseException('Database was not updated, daemon won\'t start') client_time, db_time = datetime.utcnow(), get_db_time() max_offset = timedelta(hours=1, seconds=10) if isinstance(db_time, datetime): if db_time - client_time > max_offset or client_time - db_time > max_offset: logger(logging.CRITICAL, 'Offset between client and db time too big. Stopping Suspicious-Replica-Recoverer.') return sanity_check(executable='rucio-replica-recoverer', hostname=socket.gethostname()) if once: declare_suspicious_replicas_bad(once, younger_than, nattempts, vos, limit_suspicious_files_on_rse) else: logger(logging.INFO, 'Suspicious file replicas recovery starting 1 worker.') t = threading.Thread(target=declare_suspicious_replicas_bad, kwargs={'once': once, 'younger_than': younger_than, 'nattempts': nattempts, 'vos': vos, 'limit_suspicious_files_on_rse': limit_suspicious_files_on_rse}) t.start() logger(logging.INFO, 'Waiting for interrupts') # Interruptible joins require a timeout. while t.is_alive(): t.join(timeout=3.14)
def run(once=False, threads=1, sleep_time=60): """ Starts up the Judge-Injector threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException( 'Database was not updated, daemon won\'t start') if once: rule_injector(once) else: logging.info('Injector starting %s threads' % str(threads)) threads = [ threading.Thread(target=rule_injector, kwargs={ 'once': once, 'sleep_time': sleep_time }) for i in range(0, threads) ] [t.start() for t in threads] # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
def run(once=False, sleep_time=600): """ Starts up the conveyer threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException( 'Database was not updated, daemon won\'t start') if once: logging.info('running throttler one iteration only') throttler(once=True, sleep_time=sleep_time) else: threads = [] logging.info('starting throttler thread') throttler_thread = threading.Thread(target=throttler, kwargs={ 'once': once, 'sleep_time': sleep_time }) threads.append(throttler_thread) [thread.start() for thread in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while threads: threads = [ thread.join(timeout=3.14) for thread in threads if thread and thread.is_alive() ]
def run(once=False, threads=1, sleep_time=10, bulk=100): """ Running the preparer daemon either once or by default in a loop until stop is called. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException('Database was not updated, daemon won\'t start') def preparer_kwargs(): # not sure if this is needed for threading.Thread, but it always returns a fresh dictionary return {'once': once, 'sleep_time': sleep_time, 'bulk': bulk} threads = [threading.Thread(target=preparer, name=f'conveyor-preparer-{i}', kwargs=preparer_kwargs(), daemon=True) for i in range(threads)] for thr in threads: thr.start() all_running = True while all_running: for thr in threads: thr.join(timeout=3.14) if not thr.is_alive() or graceful_stop.is_set(): all_running = False break if graceful_stop.is_set() or once: logging.info('conveyor-preparer: gracefully stopping') else: logging.warning('conveyor-preparer: stopping out of the ordinary') graceful_stop.set() for thr in threads: thr.join(timeout=3.14) logging.info('conveyor-preparer: stopped')
def run(once=False, threads=1): """ Starts up the Judge-Repairer threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException( 'Database was not updated, daemon won\'t start') executable = 'judge-repairer' hostname = socket.gethostname() sanity_check(executable=executable, hostname=hostname) if once: rule_repairer(once) else: logging.info('Repairer starting %s threads' % str(threads)) threads = [ threading.Thread(target=rule_repairer, kwargs={'once': once}) for i in range(0, threads) ] [t.start() for t in threads] # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
def run(once=False, threads=1): """ Starts up the follower threads """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException( 'Database was not updated, daemon won\'t start') hostname = socket.gethostname() sanity_check(executable='rucio-follower', hostname=hostname) if once: logging.info("executing one follower iteration only") aggregate_events(once) else: logging.info("starting follower threads") # Run the follower daemon thrice a day threads = [ get_thread_with_periodic_running_function(28800, aggregate_events, graceful_stop) for i in range(threads) ] [t.start() for t in threads] logging.info("waiting for interrupts") # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
def run(once=False, total_threads=1, full_mode=False): """ Starts up the receiver thread """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException( 'Database was not updated, daemon won\'t start') logging.info('starting receiver thread') threads = [ threading.Thread(target=receiver, kwargs={ 'id_': i, 'full_mode': full_mode, 'total_threads': total_threads }) for i in range(0, total_threads) ] [thread.start() for thread in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while threads: threads = [ thread.join(timeout=3.14) for thread in threads if thread and thread.is_alive() ]
def run( once: bool, rse_expression: str, move_subscriptions: bool = False, use_dump: bool = False, sleep_time: int = 300, threads: int = 1, dry_run: bool = False, ) -> None: """ Starts up the BB8 rebalancing threads. """ setup_logging() hostname = socket.gethostname() sanity_check(executable="rucio-bb8", hostname=hostname) logging.info("BB8 starting %s threads", str(threads)) threads = [ threading.Thread( target=rule_rebalancer, kwargs={ "once": once, "rse_expression": rse_expression, "sleep_time": sleep_time, "dry_run": dry_run, }, ) for _ in range(0, threads) ] [thread.start() for thread in threads] # Interruptible joins require a timeout. while threads[0].is_alive(): [thread.join(timeout=3.14) for thread in threads]
def run(once, rse_expression, move_subscriptions=False, use_dump=False, sleep_time=300, threads=1, dry_run=False): """ Starts up the BB8 rebalancing threads. """ setup_logging() hostname = socket.gethostname() sanity_check(executable='rucio-bb8', hostname=hostname) if once: rule_rebalancer(rse_expression=rse_expression, move_subscriptions=move_subscriptions, use_dump=use_dump, once=once) else: logging.info('BB8 starting %s threads', str(threads)) threads = [ threading.Thread(target=rule_rebalancer, kwargs={ 'once': once, 'rse_expression': rse_expression, 'sleep_time': sleep_time, 'dry_run': dry_run }) for _ in range(0, threads) ] [thread.start() for thread in threads] # Interruptible joins require a timeout. while threads[0].is_alive(): [thread.join(timeout=3.14) for thread in threads]
def run(once=False, threads=1): """ Starts up the Judge-Clean threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException( 'Database was not updated, daemon won\'t start') client_time, db_time = datetime.utcnow(), get_db_time() max_offset = timedelta(hours=1, seconds=10) if type(db_time) is datetime: if db_time - client_time > max_offset or client_time - db_time > max_offset: logging.critical( 'Offset between client and db time too big. Stopping Cleaner') return executable = 'judge-cleaner' hostname = socket.gethostname() sanity_check(executable=executable, hostname=hostname) if once: rule_cleaner(once) else: logging.info('Cleaner starting %s threads' % str(threads)) threads = [ threading.Thread(target=rule_cleaner, kwargs={'once': once}) for i in range(0, threads) ] [t.start() for t in threads] # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
def run(threads=1, chunk_size=100, once=False, greedy=False, rses=None, scheme=None, exclude_rses=None, include_rses=None, vos=None, delay_seconds=0, sleep_time=60, auto_exclude_threshold=100, auto_exclude_timeout=600): """ Starts up the reaper threads. :param threads: The total number of workers. :param chunk_size: The size of chunk for deletion. :param threads_per_worker: Total number of threads created by each worker. :param once: If True, only runs one iteration of the main loop. :param greedy: If True, delete right away replicas with tombstone. :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs. :param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock. :param exclude_rses: RSE expression to exclude RSEs from the Reaper. :param include_rses: RSE expression to include RSEs. :param vos: VOs on which to look for RSEs. Only used in multi-VO mode. If None, we either use all VOs if run from "def", or the current VO otherwise. :param delay_seconds: The delay to query replicas in BEING_DELETED state. :param sleep_time: Time between two cycles. :param auto_exclude_threshold: Number of service unavailable exceptions after which the RSE gets temporarily excluded. :param auto_exclude_timeout: Timeout for temporarily excluded RSEs. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException('Database was not updated, daemon won\'t start') logging.log(logging.INFO, 'main: starting processes') rses_to_process = get_rses_to_process(rses, include_rses, exclude_rses, vos) if not rses_to_process: logging.log(logging.ERROR, 'Reaper: No RSEs found. Exiting.') return logging.log(logging.INFO, 'Reaper: This instance will work on RSEs: %s', ', '.join([rse['rse'] for rse in rses_to_process])) # To populate the cache get_rses_to_hostname_mapping() logging.log(logging.INFO, 'starting reaper threads') threads_list = [threading.Thread(target=reaper, kwargs={'once': once, 'rses': rses, 'include_rses': include_rses, 'exclude_rses': exclude_rses, 'vos': vos, 'chunk_size': chunk_size, 'greedy': greedy, 'sleep_time': sleep_time, 'delay_seconds': delay_seconds, 'scheme': scheme, 'auto_exclude_threshold': auto_exclude_threshold, 'auto_exclude_timeout': auto_exclude_timeout}) for _ in range(0, threads)] for thread in threads_list: thread.start() logging.log(logging.INFO, 'waiting for interrupts') # Interruptible joins require a timeout. while threads_list: threads_list = [thread.join(timeout=3.14) for thread in threads_list if thread and thread.is_alive()]
def run(once=False, threads=1, bulk=1000, sleep_time=10, broker_timeout=3): ''' Starts up the hermes2 threads. ''' setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException( 'Database was not updated, daemon won\'t start') logging.info('starting hermes2 threads') thread_list = [ threading.Thread(target=hermes2, kwargs={ 'thread': cnt, 'once': once, 'bulk': bulk, 'sleep_time': sleep_time }) for cnt in range(0, threads) ] for thrd in thread_list: thrd.start() logging.debug(thread_list) # Interruptible joins require a timeout. while thread_list: thread_list = [ thread.join(timeout=3.14) for thread in thread_list if thread and thread.is_alive() ]
def run(once=False, threads=1, sleep_time=10, limit=1000): """ Starts up the Abacus-Collection-Replica threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException( 'Database was not updated, daemon won\'t start') if once: logging.info('main: executing one iteration only') collection_replica_update(once) else: logging.info('main: starting threads') threads = [ threading.Thread(target=collection_replica_update, kwargs={ 'once': once, 'sleep_time': sleep_time, 'limit': limit }) for _ in range(0, threads) ] [t.start() for t in threads] logging.info('main: waiting for interrupts') # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
def run(once=False, total_threads=1, sleep_time=60, activities=None, bulk=100, db_bulk=1000): """ Starts up the conveyer threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException('Database was not updated, daemon won\'t start') if once: logging.log(logging.INFO, 'executing one finisher iteration only') finisher(once=once, activities=activities, bulk=bulk, db_bulk=db_bulk) else: logging.log(logging.INFO, 'starting finisher threads') threads = [threading.Thread(target=finisher, kwargs={'sleep_time': sleep_time, 'activities': activities, 'db_bulk': db_bulk, 'bulk': bulk}) for _ in range(0, total_threads)] [thread.start() for thread in threads] logging.log(logging.INFO, 'waiting for interrupts') # Interruptible joins require a timeout. while threads: threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.is_alive()]
def run(once=False, last_nhours=1, external_hosts=None, fts_wait=1800, total_threads=1): """ Starts up the conveyer threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException('Database was not updated, daemon won\'t start') if not external_hosts: external_hosts = [] if once: logging.info('executing one poller iteration only') poller_latest(external_hosts, once=once, last_nhours=last_nhours) else: logging.info('starting poller threads') threads = [threading.Thread(target=poller_latest, kwargs={'external_hosts': external_hosts, 'fts_wait': fts_wait, 'last_nhours': last_nhours}) for _ in range(0, total_threads)] [thread.start() for thread in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while threads: threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.isAlive()]
def run(once=False, threads=1, fill_history_table=False, sleep_time=10): """ Starts up the Abacus-RSE threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException( 'Database was not updated, daemon won\'t start') executable = 'abacus-rse' hostname = socket.gethostname() sanity_check(executable=executable, hostname=hostname) if once: logging.info('main: executing one iteration only') rse_update(once) else: logging.info('main: starting threads') threads = [ threading.Thread(target=rse_update, kwargs={ 'once': once, 'sleep_time': sleep_time }) for i in range(0, threads) ] if fill_history_table: threads.append( get_thread_with_periodic_running_function( 3600, fill_rse_counter_history_table, graceful_stop)) [t.start() for t in threads] logging.info('main: waiting for interrupts') # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
def run(threads=1, sleep_time_datasets=60, sleep_time_files=60): """ Starts up the consumer threads """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException('Database was not updated, daemon won\'t start') dataset_queue = Queue() logging.info('starting tracer consumer threads') thread_list = [] for thread in range(0, threads): thread_list.append(Thread(target=kronos_file, kwargs={'thread': thread, 'sleep_time': sleep_time_files, 'dataset_queue': dataset_queue})) thread_list.append(Thread(target=kronos_dataset, kwargs={'thread': thread, 'sleep_time': sleep_time_datasets, 'dataset_queue': dataset_queue})) [thread.start() for thread in thread_list] logging.info('waiting for interrupts') while len(thread_list) > 0: thread_list = [thread.join(timeout=3) for thread in thread_list if thread and thread.is_alive()]
def run(threads=1, bulk=100, date_check=None, dry_run=True, grace_period=86400, once=True, unlock=False, spread_period=0, purge_replicas=False, sleep_time=60): """ Starts up the atropos threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException('Database was not updated, daemon won\'t start') if not date_check: date_check = datetime.datetime.now() else: date_check = datetime.datetime.strptime(date_check, '%Y-%m-%d') if once: logging.info('Will run only one iteration') logging.info('starting atropos threads') thread_list = [threading.Thread(target=atropos, kwargs={'once': once, 'thread': i, 'date_check': date_check, 'dry_run': dry_run, 'grace_period': grace_period, 'bulk': bulk, 'unlock': unlock, 'spread_period': spread_period, 'purge_replicas': purge_replicas, 'sleep_time': sleep_time}) for i in range(0, threads)] [t.start() for t in thread_list] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while thread_list: thread_list = [t.join(timeout=3.14) for t in thread_list if t and t.is_alive()]
def run(once=False, threads=1, loop_rate=300, max_rows=100, sleep_time=300): """ Starts up the OAuth Manager threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException( 'Database was not updated, daemon won\'t start') sanity_check(executable='OAuthManager', hostname=socket.gethostname()) if once: OAuthManager(once, loop_rate, max_rows, sleep_time) else: logging.info('OAuth Manager starting %s threads', str(threads)) threads = [ threading.Thread(target=OAuthManager, kwargs={ 'once': once, 'loop_rate': int(loop_rate), 'max_rows': max_rows, 'sleep_time': sleep_time }) for i in range(0, threads) ] [t.start() for t in threads] # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
def run(threads=1, bulk=100, once=False, sleep_time=60): """ Starts up the transmogrifier threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException( 'Database was not updated, daemon won\'t start') if once: logging.info('Will run only one iteration in a single threaded mode') transmogrifier(bulk=bulk, once=once) else: logging.info('starting transmogrifier threads') thread_list = [ threading.Thread(target=transmogrifier, kwargs={ 'once': once, 'sleep_time': sleep_time, 'bulk': bulk }) for _ in range(0, threads) ] [thread.start() for thread in thread_list] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while thread_list: thread_list = [ thread.join(timeout=3.14) for thread in thread_list if thread and thread.is_alive() ]
def run(once=False, total_workers=1, chunk_size=10, sleep_time=60): """ Starts up the undertaker threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException( 'Database was not updated, daemon won\'t start') logging.info('main: starting threads') threads = [ threading.Thread(target=undertaker, kwargs={ 'worker_number': i, 'total_workers': total_workers, 'once': once, 'chunk_size': chunk_size, 'sleep_time': sleep_time }) for i in range(0, total_workers) ] [t.start() for t in threads] logging.info('main: waiting for interrupts') # Interruptible joins require a timeout. while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
def run(once=False, group_bulk=1, group_policy='rule', mock=False, rses=None, include_rses=None, exclude_rses=None, vos=None, bulk=100, source_strategy=None, activities=None, exclude_activities=None, sleep_time=600, max_sources=4, retry_other_fts=False, total_threads=1): """ Starts up the conveyer threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException('Database was not updated, daemon won\'t start') multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False) working_rses = None if rses or include_rses or exclude_rses: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses, vos) logging.info("RSE selection: RSEs: %s, Include: %s, Exclude: %s", rses, include_rses, exclude_rses) elif multi_vo: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses, vos) logging.info("RSE selection: automatic for relevant VOs") else: logging.info("RSE selection: automatic") logging.info('starting submitter threads') if exclude_activities: if not activities: if not multi_vo: vos = ['def'] if vos and len(vos) == 1: activities = get_schema_value('ACTIVITY', vos[0]) elif vos and len(vos) > 1: logging.warning('Cannot get activity list from schema when multiple VOs given, either provide `activities` argument or run on a single VO') activities = [None] else: logging.warning('Cannot get activity list from schema when no VO given, either provide `activities` argument or `vos` with a single entry') activities = [None] for activity in exclude_activities: if activity in activities: activities.remove(activity) threads = [threading.Thread(target=submitter, kwargs={'once': once, 'rses': working_rses, 'bulk': bulk, 'group_bulk': group_bulk, 'group_policy': group_policy, 'activities': activities, 'sleep_time': sleep_time, 'max_sources': max_sources, 'source_strategy': source_strategy, 'retry_other_fts': retry_other_fts}) for _ in range(0, total_threads)] [thread.start() for thread in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while threads: threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.isAlive()]
def run(): """ Runs the distribution daemon """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException('Database was not updated, daemon won\'t start') thread = threading.Thread(target=run_distribution, kwargs={}) thread.start() while thread and thread.is_alive(): thread.join(timeout=3.14)
def run(once=False, total_threads=1, group_bulk=1, group_policy='rule', rses=None, include_rses=None, exclude_rses=None, vos=None, bulk=100, source_strategy=None, activities=[], sleep_time=600): """ Starts up the conveyer threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException('Database was not updated, daemon won\'t start') multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False) working_rses = None if rses or include_rses or exclude_rses: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses, vos) logging.info("RSE selection: RSEs: %s, Include: %s, Exclude: %s" % (rses, include_rses, exclude_rses)) elif multi_vo: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses, vos) logging.info("RSE selection: automatic for relevant VOs") else: logging.info("RSE selection: automatic") if once: logging.info('executing one stager iteration only') stager(once, rses=working_rses, bulk=bulk, group_bulk=group_bulk, group_policy=group_policy, source_strategy=source_strategy, activities=activities) else: logging.info('starting stager threads') threads = [threading.Thread(target=stager, kwargs={'rses': working_rses, 'bulk': bulk, 'group_bulk': group_bulk, 'group_policy': group_policy, 'activities': activities, 'sleep_time': sleep_time, 'source_strategy': source_strategy}) for _ in range(0, total_threads)] [thread.start() for thread in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while threads: threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.is_alive()]