def run(once=False, group_bulk=1, group_policy='rule', mock=False, rses=None, include_rses=None, exclude_rses=None, vos=None, bulk=100, source_strategy=None, activities=None, exclude_activities=None, sleep_time=600, max_sources=4, retry_other_fts=False, total_threads=1): """ Starts up the conveyer threads. """ if mock: logging.info('mock source replicas: enabled') multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False) working_rses = None if rses or include_rses or exclude_rses: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses, vos) logging.info("RSE selection: RSEs: %s, Include: %s, Exclude: %s", rses, include_rses, exclude_rses) elif multi_vo: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses, vos) logging.info("RSE selection: automatic for relevant VOs") else: logging.info("RSE selection: automatic") logging.info('starting submitter threads') if exclude_activities: if not activities: if not multi_vo: vos = ['def'] if vos and len(vos) == 1: activities = get_schema_value('ACTIVITY', vos[0]) elif vos and len(vos) > 1: logging.warning('Cannot get activity list from schema when multiple VOs given, either provide `activities` argument or run on a single VO') activities = [None] else: logging.warning('Cannot get activity list from schema when no VO given, either provide `activities` argument or `vos` with a single entry') activities = [None] for activity in exclude_activities: if activity in activities: activities.remove(activity) threads = [threading.Thread(target=submitter, kwargs={'once': once, 'rses': working_rses, 'bulk': bulk, 'group_bulk': group_bulk, 'group_policy': group_policy, 'activities': activities, 'mock': mock, 'sleep_time': sleep_time, 'max_sources': max_sources, 'source_strategy': source_strategy, 'retry_other_fts': retry_other_fts}) for _ in range(0, total_threads)] [thread.start() for thread in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while threads: threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.isAlive()]
def run(once=False, total_threads=1, group_bulk=1, group_policy='rule', rses=None, include_rses=None, exclude_rses=None, vos=None, bulk=100, source_strategy=None, activities=[], sleep_time=600): """ Starts up the conveyer threads. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise exception.DatabaseException('Database was not updated, daemon won\'t start') multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False) working_rses = None if rses or include_rses or exclude_rses: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses, vos) logging.info("RSE selection: RSEs: %s, Include: %s, Exclude: %s" % (rses, include_rses, exclude_rses)) elif multi_vo: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses, vos) logging.info("RSE selection: automatic for relevant VOs") else: logging.info("RSE selection: automatic") if once: logging.info('executing one stager iteration only') stager(once, rses=working_rses, bulk=bulk, group_bulk=group_bulk, group_policy=group_policy, source_strategy=source_strategy, activities=activities) else: logging.info('starting stager threads') threads = [threading.Thread(target=stager, kwargs={'rses': working_rses, 'bulk': bulk, 'group_bulk': group_bulk, 'group_policy': group_policy, 'activities': activities, 'sleep_time': sleep_time, 'source_strategy': source_strategy}) for _ in range(0, total_threads)] [thread.start() for thread in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while threads: threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.is_alive()]
def run(once=False, process=0, total_processes=1, total_threads=1, group_bulk=1, group_policy='rule', mock=False, rses=[], include_rses=None, exclude_rses=None, bulk=100, fts_source_strategy='auto', activities=None, sleep_time=600, max_sources=4, retry_other_fts=False): """ Starts up the conveyer threads. """ if mock: logging.info('mock source replicas: enabled') working_rses = None if rses or include_rses or exclude_rses: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses) logging.info("RSE selection: RSEs: %s, Include: %s, Exclude: %s" % (rses, include_rses, exclude_rses)) else: logging.info("RSE selection: automatic") logging.info('starting submitter threads') threads = [ threading.Thread(target=submitter, kwargs={ 'once': once, 'process': process, 'total_processes': total_processes, 'total_threads': total_threads, 'rses': working_rses, 'bulk': bulk, 'group_bulk': group_bulk, 'group_policy': group_policy, 'activities': activities, 'mock': mock, 'sleep_time': sleep_time, 'max_sources': max_sources, 'fts_source_strategy': fts_source_strategy, 'retry_other_fts': retry_other_fts }) ] [t.start() for t in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while len(threads) > 0: threads = [t.join(timeout=3.14) for t in threads if t and t.isAlive()]
def run(once=False, total_threads=1, group_bulk=1, group_policy='rule', mock=False, rses=None, include_rses=None, exclude_rses=None, bulk=100, source_strategy=None, activities=[], sleep_time=600, retry_other_fts=False): """ Starts up the conveyer threads. """ if mock: logging.info('mock source replicas: enabled') working_rses = None if rses or include_rses or exclude_rses: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses) logging.info("RSE selection: RSEs: %s, Include: %s, Exclude: %s" % (rses, include_rses, exclude_rses)) else: logging.info("RSE selection: automatic") if once: logging.info('executing one stager iteration only') stager(once, rses=working_rses, mock=mock, bulk=bulk, group_bulk=group_bulk, group_policy=group_policy, source_strategy=source_strategy, activities=activities, retry_other_fts=retry_other_fts) else: logging.info('starting stager threads') threads = [threading.Thread(target=stager, kwargs={'rses': working_rses, 'bulk': bulk, 'group_bulk': group_bulk, 'group_policy': group_policy, 'activities': activities, 'mock': mock, 'sleep_time': sleep_time, 'source_strategy': source_strategy, 'retry_other_fts': retry_other_fts}) for _ in range(0, total_threads)] [thread.start() for thread in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while threads: threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.isAlive()]
def run(once=False, group_bulk=1, group_policy='rule', mock=False, rses=None, include_rses=None, exclude_rses=None, bulk=100, source_strategy=None, activities=None, exclude_activities=None, sleep_time=600, max_sources=4, retry_other_fts=False, total_threads=1): """ Starts up the conveyer threads. """ if mock: logging.info('mock source replicas: enabled') working_rses = None if rses or include_rses or exclude_rses: working_rses = get_conveyor_rses(rses, include_rses, exclude_rses) logging.info("RSE selection: RSEs: %s, Include: %s, Exclude: %s", rses, include_rses, exclude_rses) else: logging.info("RSE selection: automatic") logging.info('starting submitter threads') if exclude_activities: if not activities: activities = get_schema_value('ACTIVITY') for activity in exclude_activities: if activity in activities: activities.remove(activity) threads = [threading.Thread(target=submitter, kwargs={'once': once, 'rses': working_rses, 'bulk': bulk, 'group_bulk': group_bulk, 'group_policy': group_policy, 'activities': activities, 'mock': mock, 'sleep_time': sleep_time, 'max_sources': max_sources, 'source_strategy': source_strategy, 'retry_other_fts': retry_other_fts}) for _ in range(0, total_threads)] [thread.start() for thread in threads] logging.info('waiting for interrupts') # Interruptible joins require a timeout. while threads: threads = [thread.join(timeout=3.14) for thread in threads if thread and thread.isAlive()]