Example #1
0
def get_blocks_at_pnn(pnn, pcli, multi_das_calls=True):
    """
    Get the list of completed replicas of closed blocks at a site
    :pnn:  the phedex node name
    :pcli: phedex client instance

    returns a dictionnary with <block name>: <number of files>
    """

    # This is not optimal in terms of calls and time but
    # prevents dasgoclient to explode memory footprint
    if multi_das_calls:
        blocks_at_pnn = {}
        logging.notice('Getting blocks with multiple das calls. %s',
                       list(string.letters + string.digits))

        for item in list(string.letters + string.digits):
            for block in pcli.list_data_items(pnn=pnn,
                                              pditem='/' + item + '*/*/*'):
                if block['block'][0]['is_open'] == 'n' and\
                    block['block'][0]['replica'][0]['complete'] == 'y':
                    blocks_at_pnn[block['block'][0]
                                  ['name']] = block['block'][0]['files']
            logging.notice('Got blocks for %s', item)
        return blocks_at_pnn
    else:
        # list(string.letters + string.digits)
        return {
            item['block'][0]['name']: item['block'][0]['files']
            for item in pcli.list_data_items(pnn=pnn)
            if item['block'][0]['is_open'] == 'n' and\
                item['block'][0]['replica'][0]['complete'] == 'y'
        }
Example #2
0
def dataset_replica_update(dataset, pnn, rse, pcli, account, dry):
    """
    Just wrapping the update method.
    """

    try:
        rcli = Client(account=account)
    except CannotAuthenticate:
        logging.warning("cannot authenticate with account %s, skipping pnn %s",
                        account, pnn)
        return None

    logging.my_fmt(label='update:rse=%s:rds=%s' % (pnn, dataset))

    logging.notice('Starting.')

    try:
        ret = _replica_update(dataset, pnn, rse, pcli, rcli, dry)

    #pylint: disable=broad-except
    except Exception as exc:
        logging.error('Exception %s raised: %s',
                      type(exc).__name__,
                      traceback.format_exc().replace('\n', '~~'))
        return None

    logging.notice('Finished %s.', ret)
Example #3
0
def get_blocks_at_pnn(pnn, pcli, multi_das_calls=True, prefix=None):
    """
    Get the list of completed replicas of closed blocks at a site
    :pnn:  the phedex node name
    :pcli: phedex client instance

    returns a dictionnary with <block name>: <number of files>
    """

    # This is not optimal in terms of calls and time but reduces the memory footprint

    blocks_at_pnn = {}
    if prefix:
        logging.summary('Getting subset of blocks at %s beginning with %s' % (pnn, prefix))
        with monitor.record_timer_block('cms_sync.pnn_blocks_split'):
            logging.summary('Getting blocks at %s starting with %s' % (pnn, prefix))
            some_blocks_at_pnn = pcli.blocks_at_site(pnn=pnn, prefix=prefix)
            blocks_at_pnn.update(some_blocks_at_pnn)
            logging.summary('Got blocks at %s starting with %s' % (pnn, prefix))
    elif multi_das_calls:
        logging.summary('Getting all blocks at %s. Multiple %s' % (pnn, multi_das_calls))
        logging.notice('Getting blocks with multiple das calls. %s', list(string.letters + string.digits))
        for item in list(string.letters + string.digits):
            with monitor.record_timer_block('cms_sync.pnn_blocks_split'):
                logging.summary('Getting blocks at %s starting with %s' % (pnn, item))
                some_blocks_at_pnn = pcli.blocks_at_site(pnn=pnn, prefix=item)
                blocks_at_pnn.update(some_blocks_at_pnn)
                logging.summary('Got blocks at %s starting with %s' % (pnn, item))
    else:
        logging.summary('Getting all blocks at %s in one call' % pnn)
        with monitor.record_timer_block('cms_sync.pnn_blocks_all'):
            blocks_at_pnn = pcli.blocks_at_site(pnn=pnn)

    logging.summary('Got blocks at %s.' % pnn)
    return blocks_at_pnn
Example #4
0
def sync(config, logs):
    """
    Main Sync process
    """

    logging.my_logfile(logs=logs)
    logging.my_fmt(label='main_sync')
    starttime = datetime.now()
    modify = {}
    workers = {}  # this is the array of running pnns
    pnns = None  # this is the array of pnn to be launched
    pool = None

    pcli = PhEDEx()

    install_mp_handler()

    conf = _load_config(config, modify, starttime)

    pnns = []

    size = conf['main']['pool']

    logging.summary('Starting')

    while conf['main']['run']:

        if pool is None:
            logging.notice('Started pool of size %d', size)
            pool = multiprocessing.NDPool(size)

        add = [
            pnn for pnn, sec in conf.items() if pnn != 'main' if sec['run']
            if pnn not in workers if pnn not in pnns
        ]

        pnns += add

        random.shuffle(pnns)

        if not _ping():
            logging.warning('Cannot ping, not launching workers')
        else:
            _launch_workers(pool, workers, pnns, pcli)
            pnns = []

        _poll_workers(workers, pnns)

        conf = _load_config(config, modify, starttime)

        if not conf['main']['run'] or\
            conf['main']['pool'] != size:

            # trigger draining of all workers, close the pool and wait
            # for the task to be over
            conf = _load_config(config, {'default': {'run': False}}, starttime)
            _drain_up(workers, pnns)
            workers = {}
            pool.close()
            pool = None
            size = conf['main']['pool']

        else:
            time.sleep(conf['main']['sleep'])

    logging.summary('Exiting.')

    return config
Example #5
0
def pnn_sync(pnn, pcli):
    """
    Synchronize one rucio dataset at one rse
    :pnn:    phedex node name.
    :pcli:   phedex client.
    """

    summary = copy.deepcopy(DEFAULT_PNN_SUMMARY)

    conf = _get_config(pnn)
    summary['conf'] = conf

    if 'verbosity' in conf:
        logging.my_lvl(conf['verbosity'])

    rcli = Client(account=SYNC_ACCOUNT_FMT % pnn.lower())

    if _pnn_abort(pnn, summary, rcli):
        return summary

    diff = get_node_diff(pnn, pcli, rcli, conf)
    summary['timing'].update(diff['timing'])
    diff = diff['return']
    summary['diff'] = diff['summary']

    if (diff['summary']['tot'] == diff['summary']['to_remove']) and \
        not conf['allow_clean']:
        logging.warning('All datasets to be removed. Aborting.')
        summary['status'] = 'aborted'
        return summary

    logging.notice("Got diff=%s, timing=%s", summary['diff'],
                   summary['timing'])

    if _pnn_abort(pnn, summary, rcli):
        return summary

    workers = get_timing(_launch_pnn_workers(conf, diff, pnn, pcli, rcli),
                         summary['timing'])

    summary['workers'] = len(workers)

    logging.notice("Launched %d workers, pool size %d, timing %s",
                   summary['workers'], int(conf['pool']),
                   summary['timing']['_launch_pnn_workers'])

    left = int(conf['chunck']) - summary['workers'] + int(
        conf['min_deletions'])

    if left > 0:
        workers_st = get_timing(
            _launch_pnn_workers_st(left, diff, pnn, pcli, rcli),
            summary['timing'])

        summary['workers_st'] = len(workers_st)

        logging.notice("Launched %d single thread workers, timing %s",
                       summary['workers_st'],
                       summary['timing']['_launch_pnn_workers_st'])

        workers = dict(workers, **workers_st)

    _get_pnn_workers(workers, summary)

    summary['status'] = 'finished'

    return summary
Example #6
0
    def update_replicas(self, dry=False):
        """
        Add or removes replicas for the dataset at rse.
        :dry:  Drydrun. default false
        """

        logging.notice('Updating replicas for %s:%s at %s' %
                       (self.scope, self.dataset, self.rse))

        replicas = self.rcli.list_replicas([{
            'scope': self.scope,
            'name': self.dataset
        }],
                                           rse_expression='rse=%s' % self.rse)

        rrepl = [repl['name'] for repl in replicas]

        prepl = [repl for repl in self.replicas.keys()]

        missing = list(set(prepl) - set(rrepl))

        to_remove = list(set(rrepl) - set(prepl))

        if missing and dry:
            logging.dry('Adding replicas %s to rse %s.', str(missing),
                        self.rse)

        elif missing:
            logging.verbose('Adding replicas %s to rse %s.', str(missing),
                            self.rse)

            self.rcli.add_replicas(rse=self.rse,
                                   files=[{
                                       'scope':
                                       self.scope,
                                       'name':
                                       self.replicas[lfn]['name'],
                                       'adler32':
                                       self.replicas[lfn]['checksum'],
                                       'bytes':
                                       self.replicas[lfn]['size'],
                                   } for lfn in missing])

            # missing files that are not in the list of dataset files
            # are to be attached.
            lfns = [
                item['name']
                for item in self.rcli.list_files(scope=self.scope,
                                                 name=self.dataset)
            ]

            missing_lfns = list(set(missing) - set(lfns))
            if missing_lfns:
                logging.verbose('Attaching lfns %s to dataset %s.',
                                str(missing_lfns), self.dataset)

                try:
                    self.rcli.attach_dids(
                        scope=self.scope,
                        name=self.dataset,
                        dids=[{
                            'scope': self.scope,
                            'name': lfn
                        } for lfn in list(set(missing) - set(lfns))])

                except FileAlreadyExists:
                    logging.warning('Trying to attach already existing files.')

        if to_remove and dry:
            logging.dry('Removing replicas %s from rse %s.', str(to_remove),
                        self.rse)

        elif to_remove:
            logging.verbose('Removing replicas %s from rse %s.',
                            str(to_remove), self.rse)
            for to_remove_chunk in chunks(to_remove, REMOVE_CHUNK_SIZE):
                attempt = 0
                while True:
                    attempt += 1
                    try:
                        self.rcli.delete_replicas(rse=self.rse,
                                                  files=[{
                                                      'scope': self.scope,
                                                      'name': lfn,
                                                  } for lfn in to_remove_chunk
                                                         ])
                        break
                    except DatabaseException:
                        logging.warning(
                            'DatabaseException raised, retrying...')
                        if attempt > 3:
                            raise
                        time.sleep(randint(1, 5))

        return {'added': missing, 'removed': to_remove}
Example #7
0
def pnn_sync(pnn, pcli):
    """
    Synchronize one rucio dataset at one rse
    :pnn:    phedex node name.
    :pcli:   phedex client.
    """
    monitor.record_counter('cms_sync.site_started')
    summary = copy.deepcopy(DEFAULT_PNN_SUMMARY)

    conf = _get_config(pnn)
    summary['conf'] = conf

    if 'verbosity' in conf:
        logging.my_lvl(conf['verbosity'])

    rcli = Client(account=SYNC_ACCOUNT_FMT % pnn.lower())

    if _pnn_abort(pnn, summary, rcli):
        return summary
# Do the loop here? with conf['multi_das']

    if conf['multi_das_calls']:
        prefixes = list(string.letters + string.digits)
        random.shuffle(prefixes)
    else:
        prefixes = [None]

    for prefix in prefixes:
        diff = get_node_diff(pnn, pcli, rcli, conf, prefix=prefix)
        summary['timing'].update(diff['timing'])
        diff = diff['return']
        summary['diff'] = diff['summary']

        if (diff['summary']['tot']
                == diff['summary']['to_remove']) and not conf['allow_clean']:
            logging.warning('All datasets to be removed. Aborting.')
            summary['status'] = 'aborted'
            continue


#            return summary

        logging.notice("Got diff=%s, timing=%s", summary['diff'],
                       summary['timing'])

        if _pnn_abort(pnn, summary, rcli):
            return summary

        workers = get_timing(_launch_pnn_workers(conf, diff, pnn, pcli, rcli),
                             summary['timing'])

    summary['workers'] = len(workers)

    logging.notice("Launched %d workers, pool size %d, timing %s",
                   summary['workers'], int(conf['pool']),
                   summary['timing']['_launch_pnn_workers'])

    left = int(conf['chunck']) - summary['workers'] + int(
        conf['min_deletions'])

    if left > 0:
        workers_st = get_timing(
            _launch_pnn_workers_st(left, diff, pnn, pcli, rcli),
            summary['timing'])

        summary['workers_st'] = len(workers_st)

        logging.notice("Launched %d single thread workers, timing %s",
                       summary['workers_st'],
                       summary['timing']['_launch_pnn_workers_st'])

        workers = dict(workers, **workers_st)

    _get_pnn_workers(workers, summary)
    monitor.record_counter('cms_sync.site_completed')

    summary['status'] = 'finished'

    return summary