Exemple #1
0
def _do_stageout(job, xdata, activity, title):
    """
    Use the `StageOutClient` in the Data API to perform stage-out.

    :param job: job object.
    :param xdata: list of FileSpec objects.
    :param activity: copytool activity or preferred list of activities to resolve copytools
    :param title: type of stage-out (output, log) (string).
    :return: True in case of success transfers
    """

    log = get_logger(job.jobid)
    log.info('prepare to stage-out %d %s file(s)' % (len(xdata), title))

    event_type = "put_sm"
    #if log_transfer:
    #    eventType += '_logs'
    #if special_log_transfer:
    #    eventType += '_logs_os'
    if job.is_analysis():
        event_type += "_a"
    rse = get_rse(xdata)
    localsite = remotesite = rse
    trace_report = TraceReport(pq=os.environ.get('PILOT_SITENAME', ''), localSite=localsite, remoteSite=remotesite, dataset="", eventType=event_type)
    trace_report.init(job)

    try:
        client = StageOutClient(job.infosys, logger=log, trace_report=trace_report)
        kwargs = dict(workdir=job.workdir, cwd=job.workdir, usecontainer=False, job=job)  #, mode='stage-out')
        # prod analy unification: use destination preferences from PanDA server for unified queues
        if job.infosys.queuedata.type != 'unified':
            client.prepare_destinations(xdata, activity)  ## FIX ME LATER: split activities: for astorages and for copytools (to unify with ES workflow)
        client.transfer(xdata, activity, **kwargs)
    except PilotException as error:
        import traceback
        error_msg = traceback.format_exc()
        log.error(error_msg)
        msg = errors.format_diagnostics(error.get_error_code(), error_msg)
        job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(error.get_error_code(), msg=msg)
    except Exception:
        import traceback
        log.error(traceback.format_exc())
        # do not raise the exception since that will prevent also the log from being staged out
        # error = PilotException("stageOut failed with error=%s" % e, code=ErrorCodes.STAGEOUTFAILED)
    else:
        log.debug('stage-out client completed')

    log.info('summary of transferred files:')
    for e in xdata:
        if not e.status:
            status = "(not transferred)"
        else:
            status = e.status
        log.info(" -- lfn=%s, status_code=%s, status=%s" % (e.lfn, e.status_code, status))

    remain_files = [e for e in xdata if e.status not in ['transferred']]
    log.debug('remain_files=%s' % str(remain_files))
    log.debug('xdata=%s' % str(xdata))

    return not remain_files
Exemple #2
0
def _do_stageout(job, xdata, activity, queue, title, output_dir=''):
    """
    Use the `StageOutClient` in the Data API to perform stage-out.

    :param job: job object.
    :param xdata: list of FileSpec objects.
    :param activity: copytool activity or preferred list of activities to resolve copytools
    :param title: type of stage-out (output, log) (string).
    :param queue: PanDA queue (string).
    :return: True in case of success transfers
    """

    logger.info('prepare to stage-out %d %s file(s)', len(xdata), title)
    label = 'stage-out'

    # should stage-in be done by a script (for containerisation) or by invoking the API (ie classic mode)?
    use_container = pilot.util.middleware.use_middleware_script(
        job.infosys.queuedata.container_type.get("middleware"))
    if use_container:
        logger.info('stage-out will be done by a script')
        try:
            eventtype, localsite, remotesite = get_trace_report_variables(
                job, label=label)
            pilot.util.middleware.containerise_middleware(
                job,
                xdata,
                queue,
                eventtype,
                localsite,
                remotesite,
                job.infosys.queuedata.container_options,
                output_dir,
                label=label,
                container_type=job.infosys.queuedata.container_type.get(
                    "middleware"))
        except PilotException as error:
            logger.warning(
                'stage-out containerisation threw a pilot exception: %s',
                error)
        except Exception as error:
            logger.warning('stage-out containerisation threw an exception: %s',
                           error)
    else:
        try:
            logger.info('stage-out will not be done in a container')

            # create the trace report
            trace_report = create_trace_report(job, label=label)

            client = StageOutClient(job.infosys,
                                    logger=logger,
                                    trace_report=trace_report)
            kwargs = dict(
                workdir=job.workdir,
                cwd=job.workdir,
                usecontainer=False,
                job=job,
                output_dir=output_dir,
                catchall=job.infosys.queuedata.catchall)  #, mode='stage-out')
            # prod analy unification: use destination preferences from PanDA server for unified queues
            if job.infosys.queuedata.type != 'unified':
                client.prepare_destinations(
                    xdata, activity
                )  ## FIX ME LATER: split activities: for astorages and for copytools (to unify with ES workflow)
            client.transfer(xdata, activity, **kwargs)
        except PilotException as error:
            import traceback
            error_msg = traceback.format_exc()
            logger.error(error_msg)
            msg = errors.format_diagnostics(error.get_error_code(), error_msg)
            job.piloterrorcodes, job.piloterrordiags = errors.add_error_code(
                error.get_error_code(), msg=msg)
        except Exception:
            import traceback
            logger.error(traceback.format_exc())
            # do not raise the exception since that will prevent also the log from being staged out
            # error = PilotException("stageOut failed with error=%s" % e, code=ErrorCodes.STAGEOUTFAILED)
        else:
            logger.debug('stage-out client completed')

    logger.info('summary of transferred files:')
    for iofile in xdata:
        if not iofile.status:
            status = "(not transferred)"
        else:
            status = iofile.status
        logger.info(" -- lfn=%s, status_code=%s, status=%s", iofile.lfn,
                    iofile.status_code, status)

    remain_files = [
        iofile for iofile in xdata if iofile.status not in ['transferred']
    ]

    return not remain_files
Exemple #3
0
        files = [{
            'scope': scope,
            'lfn': lfn,
            'workdir': args.workdir,
            'dataset': dataset,
            'ddmendpoint': ddmendpoint,
            'ddmendpoint_alt': None
        }]
        # do not abbreviate the following two lines as otherwise the content of xfiles will be a list of generator objects
        _xfiles = [FileSpec(type='output', **f) for f in files]
        xfiles += _xfiles

        # prod analy unification: use destination preferences from PanDA server for unified queues
        if infoservice.queuedata.type != 'unified':
            client.prepare_destinations(
                xfiles, activity
            )  ## FIX ME LATER: split activities: for astorages and for copytools (to unify with ES workflow)

    try:
        r = client.transfer(xfiles, activity=activity, **kwargs)
    except PilotException as error:
        import traceback
        error_msg = traceback.format_exc()
        logger.error(error_msg)
        err = errors.format_diagnostics(error.get_error_code(), error_msg)
    except Exception as error:
        err = str(error)
        errcode = -1
        message(err)

#    for lfn, scope, dataset, ddmendpoint, guid in list(zip(lfns, scopes, datasets, ddmendpoints, guids)):