Example #1
0
def rule_rebalancer(
    rse_expression: str,
    move_subscriptions: bool = False,
    use_dump: bool = False,
    sleep_time: int = 300,
    once: bool = True,
    dry_run: bool = False,
) -> None:
    """
    Create a rule_rebalancer worker

    :param rse_expression: The RSE expression where the rule rebalancing is applied.
    :param move_subscription: To allow rebalancing of subscription rules. Not implemented yet.
    :param use_dump: To use dump instead of DB query.
    :param sleep_time: Time between two cycles.
    :param once: Run only once.
    :param dry_run: To run in dry run mode (i.e. rules are not created).
    """
    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable="rucio-bb8",
        logger_prefix="rucio-bb8",
        partition_wait_time=1,
        sleep_time=sleep_time,
        run_once_fnc=functools.partial(
            run_once,
            rse_expression=rse_expression,
            move_subscriptions=move_subscriptions,
            use_dump=use_dump,
            dry_run=dry_run,
        ),
    )
Example #2
0
def reaper(rses,
           include_rses,
           exclude_rses,
           vos=None,
           chunk_size=100,
           once=False,
           greedy=False,
           scheme=None,
           delay_seconds=0,
           sleep_time=60,
           auto_exclude_threshold=100,
           auto_exclude_timeout=600):
    """
    Main loop to select and delete files.

    :param rses:                   List of RSEs the reaper should work against. If empty, it considers all RSEs.
    :param include_rses:           RSE expression to include RSEs.
    :param exclude_rses:           RSE expression to exclude RSEs from the Reaper.
    :param vos:                    VOs on which to look for RSEs. Only used in multi-VO mode.
                                   If None, we either use all VOs if run from "def", or the current VO otherwise.
    :param chunk_size:             The size of chunk for deletion.
    :param once:                   If True, only runs one iteration of the main loop.
    :param greedy:                 If True, delete right away replicas with tombstone.
    :param scheme:                 Force the reaper to use a particular protocol, e.g., mock.
    :param delay_seconds:          The delay to query replicas in BEING_DELETED state.
    :param sleep_time:             Time between two cycles.
    :param auto_exclude_threshold: Number of service unavailable exceptions after which the RSE gets temporarily excluded.
    :param auto_exclude_timeout:   Timeout for temporarily excluded RSEs.
    """

    executable = 'reaper'
    oidc_account = config_get_bool('reaper', 'oidc_account', False, '')
    oidc_scope = config_get('reaper', 'oidc_scope', False, 'delete')
    oidc_audience = config_get('reaper', 'oidc_audience', False, 'rse')

    run_daemon(once=once,
               graceful_stop=GRACEFUL_STOP,
               executable=executable,
               logger_prefix=executable,
               partition_wait_time=0 if once else 10,
               sleep_time=sleep_time,
               run_once_fnc=functools.partial(
                   run_once,
                   rses=rses,
                   include_rses=include_rses,
                   exclude_rses=exclude_rses,
                   vos=vos,
                   chunk_size=chunk_size,
                   greedy=greedy,
                   scheme=scheme,
                   delay_seconds=delay_seconds,
                   auto_exclude_threshold=auto_exclude_threshold,
                   auto_exclude_timeout=auto_exclude_timeout,
                   oidc_account=oidc_account,
                   oidc_scope=oidc_scope,
                   oidc_audience=oidc_audience,
               ))
Example #3
0
def account_update(once=False, sleep_time=10):
    """
    Main loop to check and update the Account Counters.
    """
    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable='abacus-account',
        logger_prefix='account_update',
        partition_wait_time=1,
        sleep_time=sleep_time,
        run_once_fnc=run_once,
    )
Example #4
0
def preparer(once, sleep_time, bulk, partition_wait_time=10):
    # Make an initial heartbeat so that all instanced daemons have the correct worker number on the next try
    logger_prefix = executable = 'conveyor-preparer'

    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable=executable,
        logger_prefix=logger_prefix,
        partition_wait_time=partition_wait_time,
        sleep_time=sleep_time,
        run_once_fnc=functools.partial(run_once, bulk=bulk),
        activities=None,
    )
Example #5
0
def finisher(once=False,
             sleep_time=60,
             activities=None,
             bulk=100,
             db_bulk=1000,
             partition_wait_time=10):
    """
    Main loop to update the replicas and rules based on finished requests.
    """
    try:
        conveyor_config = {item[0]: item[1] for item in items('conveyor')}
    except ConfigNotFound:
        logging.log(logging.INFO, 'No configuration found for conveyor')
        conveyor_config = {}

    # Get suspicious patterns
    suspicious_patterns = conveyor_config.get('suspicious_pattern', [])
    if suspicious_patterns:
        pattern = str(suspicious_patterns)
        patterns = pattern.split(",")
        suspicious_patterns = [re.compile(pat.strip()) for pat in patterns]
    logging.log(
        logging.DEBUG, "Suspicious patterns: %s" %
        [pat.pattern for pat in suspicious_patterns])

    retry_protocol_mismatches = conveyor_config.get(
        'retry_protocol_mismatches', False)

    logger_prefix = executable = 'conveyor-finisher'
    if activities:
        activities.sort()
        executable += '--activities ' + str(activities)

    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable=executable,
        logger_prefix=logger_prefix,
        partition_wait_time=partition_wait_time,
        sleep_time=sleep_time,
        run_once_fnc=functools.partial(
            run_once,
            bulk=bulk,
            db_bulk=db_bulk,
            suspicious_patterns=suspicious_patterns,
            retry_protocol_mismatches=retry_protocol_mismatches,
        ),
        activities=activities,
    )
Example #6
0
def collection_replica_update(once=False, limit=1000, sleep_time=10):
    """
    Main loop to check and update the collection replicas.
    """
    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable='abacus-collection-replica',
        logger_prefix='collection_replica_update',
        partition_wait_time=1,
        sleep_time=sleep_time,
        run_once_fnc=functools.partial(
            run_once,
            limit=limit,
        ),
    )
Example #7
0
def rule_injector(once=False, sleep_time=60):
    """
    Main loop to check for asynchronous creation of replication rules
    """
    executable = 'judge-injector'
    paused_rules = {}  # {rule_id: datetime}
    run_daemon(once=once,
               graceful_stop=graceful_stop,
               executable=executable,
               logger_prefix=executable,
               partition_wait_time=1,
               sleep_time=sleep_time,
               run_once_fnc=functools.partial(
                   run_once,
                   paused_rules=paused_rules,
               ))
Example #8
0
def poller(once=False,
           activities=None,
           sleep_time=60,
           fts_bulk=100,
           db_bulk=1000,
           older_than=60,
           activity_shares=None,
           partition_wait_time=10):
    """
    Main loop to check the status of a transfer primitive with a transfertool.
    """

    try:
        timeout = config_get('conveyor', 'poll_timeout')
        timeout = float(timeout)
    except NoOptionError:
        timeout = None

    multi_vo = config_get_bool('common', 'multi_vo', False, None)
    logger_prefix = executable = 'conveyor-poller'
    if activities:
        activities.sort()
        executable += '--activities ' + str(activities)
    if activity_shares:
        activities.sort()
        executable += '--activity_shares' + str(activity_shares)
    if FILTER_TRANSFERTOOL:
        executable += ' --filter-transfertool ' + FILTER_TRANSFERTOOL

    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable=executable,
        logger_prefix=logger_prefix,
        partition_wait_time=partition_wait_time,
        sleep_time=sleep_time,
        run_once_fnc=functools.partial(
            run_once,
            fts_bulk=fts_bulk,
            db_bulk=db_bulk,
            older_than=older_than,
            activity_shares=activity_shares,
            multi_vo=multi_vo,
            timeout=timeout,
        ),
        activities=activities,
    )
Example #9
0
def undertaker(once: bool = False, sleep_time: int = 60, chunk_size: int = 10):
    """
    Main loop to select and delete dids.
    """
    executable = 'undertaker'
    paused_dids = {}  # {(scope, name): datetime}
    run_daemon(once=once,
               graceful_stop=graceful_stop,
               executable=executable,
               logger_prefix=executable,
               partition_wait_time=1,
               sleep_time=sleep_time,
               run_once_fnc=functools.partial(
                   run_once,
                   paused_dids=paused_dids,
                   chunk_size=chunk_size,
               ))
Example #10
0
def re_evaluator(once=False, sleep_time=30, did_limit=100):
    """
    Main loop to check the re-evaluation of dids.
    """

    paused_dids = {}  # {(scope, name): datetime}
    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable='judge-evaluator',
        logger_prefix='re_evaluator',
        partition_wait_time=1,
        sleep_time=sleep_time,
        run_once_fnc=functools.partial(
            run_once,
            did_limit=did_limit,
            paused_dids=paused_dids,
        )
    )
Example #11
0
def throttler(once=False, sleep_time=600, partition_wait_time=10):
    """
    Main loop to check rse transfer limits.
    """

    logging.info('Throttler starting')

    logger_prefix = executable = 'conveyor-throttler'

    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable=executable,
        logger_prefix=logger_prefix,
        partition_wait_time=partition_wait_time,
        sleep_time=sleep_time,
        run_once_fnc=run_once,
        activities=None,
    )
Example #12
0
def transmogrifier(bulk: int = 5,
                   once: bool = False,
                   sleep_time: int = 60) -> None:
    """
    Creates a Transmogrifier Worker that gets a list of new DIDs for a given hash,
    identifies the subscriptions matching the DIDs and
    submit a replication rule for each DID matching a subscription.

    :param bulk: The number of requests to process.
    :param once: Run only once.
    :param sleep_time: Time between two cycles.
    """
    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable="transmogrifier",
        logger_prefix="transmogrifier",
        partition_wait_time=1,
        sleep_time=sleep_time,
        run_once_fnc=functools.partial(
            run_once,
            bulk=bulk,
        ),
    )
Example #13
0
def submitter(once=False,
              rses=None,
              partition_wait_time=10,
              bulk=100,
              group_bulk=1,
              group_policy='rule',
              source_strategy=None,
              activities=None,
              sleep_time=600,
              max_sources=4,
              archive_timeout_override=None,
              filter_transfertool=FILTER_TRANSFERTOOL,
              transfertool=TRANSFER_TOOL,
              transfertype=TRANSFER_TYPE,
              ignore_availability=False):
    """
    Main loop to submit a new transfer primitive to a transfertool.
    """

    try:
        partition_hash_var = config_get('conveyor', 'partition_hash_var')
    except NoOptionError:
        partition_hash_var = None
    try:
        scheme = config_get('conveyor', 'scheme')
    except NoOptionError:
        scheme = None
    try:
        failover_scheme = config_get('conveyor', 'failover_scheme')
    except NoOptionError:
        failover_scheme = None
    try:
        timeout = config_get('conveyor', 'submit_timeout')
        timeout = float(timeout)
    except NoOptionError:
        timeout = None

    try:
        bring_online = config_get_int('conveyor', 'bring_online')
    except NoOptionError:
        bring_online = 43200

    try:
        max_time_in_queue = {}
        timelife_conf = config_get('conveyor', 'max_time_in_queue')
        timelife_confs = timelife_conf.split(",")
        for conf in timelife_confs:
            act, timelife = conf.split(":")
            max_time_in_queue[act.strip()] = int(timelife.strip())
    except NoOptionError:
        max_time_in_queue = {}

    if 'default' not in max_time_in_queue:
        max_time_in_queue['default'] = 168
    logging.debug("Maximum time in queue for different activities: %s",
                  max_time_in_queue)

    logger_prefix = executable = "conveyor-submitter"
    if activities:
        activities.sort()
        executable += '--activities ' + str(activities)
    if filter_transfertool:
        executable += ' --filter-transfertool ' + filter_transfertool
    if rses:
        rse_ids = [rse['id'] for rse in rses]
    else:
        rse_ids = None

    transfertools = transfertool.split(',')
    transfertool_kwargs = {
        FTS3Transfertool: {
            'group_policy': group_policy,
            'group_bulk': group_bulk,
            'source_strategy': source_strategy,
            'max_time_in_queue': max_time_in_queue,
            'bring_online': bring_online,
            'default_lifetime': 172800,
            'archive_timeout_override': archive_timeout_override,
        },
        GlobusTransferTool: {
            'group_policy': transfertype,
            'group_bulk': group_bulk,
        },
    }

    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable=executable,
        logger_prefix=logger_prefix,
        partition_wait_time=partition_wait_time,
        sleep_time=sleep_time,
        run_once_fnc=functools.partial(
            run_once,
            bulk=bulk,
            group_bulk=group_bulk,
            filter_transfertool=filter_transfertool,
            transfertools=transfertools,
            ignore_availability=ignore_availability,
            scheme=scheme,
            failover_scheme=failover_scheme,
            partition_hash_var=partition_hash_var,
            rse_ids=rse_ids,
            timeout=timeout,
            transfertool_kwargs=transfertool_kwargs,
        ),
        activities=activities,
    )
Example #14
0
def stager(once=False,
           rses=None,
           bulk=100,
           group_bulk=1,
           group_policy='rule',
           source_strategy=None,
           activities=None,
           sleep_time=600):
    """
    Main loop to submit a new transfer primitive to a transfertool.
    """

    try:
        scheme = config_get('conveyor', 'scheme')
    except NoOptionError:
        scheme = None

    try:
        failover_scheme = config_get('conveyor', 'failover_scheme')
    except NoOptionError:
        failover_scheme = None

    try:
        bring_online = config_get_int('conveyor', 'bring_online')
    except NoOptionError:
        bring_online = 43200

    try:
        max_time_in_queue = {}
        timelife_conf = config_get('conveyor', 'max_time_in_queue')
        timelife_confs = timelife_conf.split(",")
        for conf in timelife_confs:
            act, timelife = conf.split(":")
            max_time_in_queue[act.strip()] = int(timelife.strip())
    except NoOptionError:
        max_time_in_queue = {}
    if 'default' not in max_time_in_queue:
        max_time_in_queue['default'] = 168
    logging.debug("Maximum time in queue for different activities: %s" %
                  max_time_in_queue)

    logger_prefix = executable = 'conveyor-stager'
    if activities:
        activities.sort()
        executable += '--activities ' + str(activities)

    if rses:
        rse_ids = [rse['id'] for rse in rses]
    else:
        rse_ids = None

    transfertool_kwargs = {
        FTS3Transfertool: {
            'group_policy': group_policy,
            'group_bulk': group_bulk,
            'source_strategy': source_strategy,
            'max_time_in_queue': max_time_in_queue,
            'bring_online': bring_online,
            'default_lifetime': -1,
        }
    }

    run_daemon(
        once=once,
        graceful_stop=graceful_stop,
        executable=executable,
        logger_prefix=logger_prefix,
        partition_wait_time=None,
        sleep_time=sleep_time,
        run_once_fnc=functools.partial(
            run_once,
            bulk=bulk,
            group_bulk=group_bulk,
            scheme=scheme,
            failover_scheme=failover_scheme,
            rse_ids=rse_ids,
            transfertool_kwargs=transfertool_kwargs,
        ),
        activities=activities,
    )