def finisher(once=False, sleep_time=60, activities=None, bulk=100, db_bulk=1000, partition_wait_time=10): """ Main loop to update the replicas and rules based on finished requests. """ try: conveyor_config = {item[0]: item[1] for item in items('conveyor')} except ConfigNotFound: logging.log(logging.INFO, 'No configuration found for conveyor') conveyor_config = {} # Get suspicious patterns suspicious_patterns = conveyor_config.get('suspicious_pattern', []) if suspicious_patterns: pattern = str(suspicious_patterns) patterns = pattern.split(",") suspicious_patterns = [re.compile(pat.strip()) for pat in patterns] logging.log( logging.DEBUG, "Suspicious patterns: %s" % [pat.pattern for pat in suspicious_patterns]) retry_protocol_mismatches = conveyor_config.get( 'retry_protocol_mismatches', False) logger_prefix = executable = 'conveyor-finisher' if activities: activities.sort() executable += '--activities ' + str(activities) run_conveyor_daemon( once=once, graceful_stop=graceful_stop, executable=executable, logger_prefix=logger_prefix, partition_wait_time=partition_wait_time, sleep_time=sleep_time, run_once_fnc=functools.partial( run_once, bulk=bulk, db_bulk=db_bulk, suspicious_patterns=suspicious_patterns, retry_protocol_mismatches=retry_protocol_mismatches, ), activities=activities, )
def preparer(once, sleep_time, bulk, partition_wait_time=10): # Make an initial heartbeat so that all instanced daemons have the correct worker number on the next try logger_prefix = executable = 'conveyor-preparer' run_conveyor_daemon( once=once, graceful_stop=graceful_stop, executable=executable, logger_prefix=logger_prefix, partition_wait_time=partition_wait_time, sleep_time=sleep_time, run_once_fnc=functools.partial(run_once, bulk=bulk), activities=None, heart_beat_older_than=None, )
def poller(once=False, activities=None, sleep_time=60, fts_bulk=100, db_bulk=1000, older_than=60, activity_shares=None, partition_wait_time=10): """ Main loop to check the status of a transfer primitive with a transfertool. """ try: timeout = config_get('conveyor', 'poll_timeout') timeout = float(timeout) except NoOptionError: timeout = None multi_vo = config_get_bool('common', 'multi_vo', False, None) logger_prefix = executable = 'conveyor-poller' if activities: activities.sort() executable += '--activities ' + str(activities) if activity_shares: activities.sort() executable += '--activity_shares' + str(activity_shares) if FILTER_TRANSFERTOOL: executable += ' --filter-transfertool ' + FILTER_TRANSFERTOOL run_conveyor_daemon( once=once, graceful_stop=graceful_stop, executable=executable, logger_prefix=logger_prefix, partition_wait_time=partition_wait_time, sleep_time=sleep_time, run_once_fnc=functools.partial( run_once, fts_bulk=fts_bulk, db_bulk=db_bulk, older_than=older_than, activity_shares=activity_shares, multi_vo=multi_vo, timeout=timeout, ), activities=activities, heart_beat_older_than=3600, )
def throttler(once=False, sleep_time=600, partition_wait_time=10): """ Main loop to check rse transfer limits. """ logging.info('Throttler starting') logger_prefix = executable = 'conveyor-throttler' run_conveyor_daemon( once=once, graceful_stop=graceful_stop, executable=executable, logger_prefix=logger_prefix, partition_wait_time=partition_wait_time, sleep_time=sleep_time, run_once_fnc=run_once, activities=None, )
def stager(once=False, rses=None, bulk=100, group_bulk=1, group_policy='rule', source_strategy=None, activities=None, sleep_time=600): """ Main loop to submit a new transfer primitive to a transfertool. """ try: scheme = config_get('conveyor', 'scheme') except NoOptionError: scheme = None try: failover_scheme = config_get('conveyor', 'failover_scheme') except NoOptionError: failover_scheme = None try: bring_online = config_get_int('conveyor', 'bring_online') except NoOptionError: bring_online = 43200 try: max_time_in_queue = {} timelife_conf = config_get('conveyor', 'max_time_in_queue') timelife_confs = timelife_conf.split(",") for conf in timelife_confs: act, timelife = conf.split(":") max_time_in_queue[act.strip()] = int(timelife.strip()) except NoOptionError: max_time_in_queue = {} if 'default' not in max_time_in_queue: max_time_in_queue['default'] = 168 logging.debug("Maximum time in queue for different activities: %s" % max_time_in_queue) logger_prefix = executable = 'conveyor-stager' if activities: activities.sort() executable += '--activities ' + str(activities) if rses: rse_ids = [rse['id'] for rse in rses] else: rse_ids = None transfertool_kwargs = { FTS3Transfertool: { 'group_policy': group_policy, 'group_bulk': group_bulk, 'source_strategy': source_strategy, 'max_time_in_queue': max_time_in_queue, 'bring_online': bring_online, 'default_lifetime': -1, } } run_conveyor_daemon( once=once, graceful_stop=graceful_stop, executable=executable, logger_prefix=logger_prefix, partition_wait_time=None, sleep_time=sleep_time, run_once_fnc=functools.partial( run_once, bulk=bulk, group_bulk=group_bulk, scheme=scheme, failover_scheme=failover_scheme, rse_ids=rse_ids, transfertool_kwargs=transfertool_kwargs, ), activities=activities, heart_beat_older_than=None, )
def submitter(once=False, rses=None, partition_wait_time=10, bulk=100, group_bulk=1, group_policy='rule', source_strategy=None, activities=None, sleep_time=600, max_sources=4, archive_timeout_override=None, filter_transfertool=FILTER_TRANSFERTOOL, transfertool=TRANSFER_TOOL, transfertype=TRANSFER_TYPE, ignore_availability=False): """ Main loop to submit a new transfer primitive to a transfertool. """ try: partition_hash_var = config_get('conveyor', 'partition_hash_var') except NoOptionError: partition_hash_var = None try: scheme = config_get('conveyor', 'scheme') except NoOptionError: scheme = None try: failover_scheme = config_get('conveyor', 'failover_scheme') except NoOptionError: failover_scheme = None try: timeout = config_get('conveyor', 'submit_timeout') timeout = float(timeout) except NoOptionError: timeout = None try: bring_online = config_get_int('conveyor', 'bring_online') except NoOptionError: bring_online = 43200 try: max_time_in_queue = {} timelife_conf = config_get('conveyor', 'max_time_in_queue') timelife_confs = timelife_conf.split(",") for conf in timelife_confs: act, timelife = conf.split(":") max_time_in_queue[act.strip()] = int(timelife.strip()) except NoOptionError: max_time_in_queue = {} if 'default' not in max_time_in_queue: max_time_in_queue['default'] = 168 logging.debug("Maximum time in queue for different activities: %s", max_time_in_queue) logger_prefix = executable = "conveyor-submitter" if activities: activities.sort() executable += '--activities ' + str(activities) if filter_transfertool: executable += ' --filter-transfertool ' + filter_transfertool if rses: rse_ids = [rse['id'] for rse in rses] else: rse_ids = None transfertool_kwargs = { FTS3Transfertool: { 'group_policy': group_policy, 'group_bulk': group_bulk, 'source_strategy': source_strategy, 'max_time_in_queue': max_time_in_queue, 'bring_online': bring_online, 'default_lifetime': 172800, 'archive_timeout_override': archive_timeout_override, }, GlobusTransferTool: { 'group_policy': transfertype, 'group_bulk': group_bulk, }, } run_conveyor_daemon( once=once, graceful_stop=graceful_stop, executable=executable, logger_prefix=logger_prefix, partition_wait_time=partition_wait_time, sleep_time=sleep_time, run_once_fnc=functools.partial( run_once, bulk=bulk, group_bulk=group_bulk, filter_transfertool=filter_transfertool, transfertool=transfertool, ignore_availability=ignore_availability, scheme=scheme, failover_scheme=failover_scheme, partition_hash_var=partition_hash_var, rse_ids=rse_ids, timeout=timeout, transfertool_kwargs=transfertool_kwargs, ), activities=activities, )