Пример #1
0
def search_for_new(config, config_file, post_config_file, process_msg,
                   store_msg, qseq, fastq):
    """Search for any new unreported directories.
    """
    reported = _read_reported(config["msg_db"])
    for dname in _get_directories(config):
        if os.path.isdir(dname) and dname not in reported:
            if _is_finished_dumping(dname):
                # Injects run_name on logging calls.
                # Convenient for run_name on "Subject" for email notifications
                with logbook.Processor(lambda record: record.extra.__setitem__(
                        'run', os.path.basename(dname))):
                    logger2.info(
                        "The instrument has finished dumping on directory %s" %
                        dname)
                    _update_reported(config["msg_db"], dname)
                    _process_samplesheets(dname, config)
                    if qseq:
                        logger2.info("Generating qseq files for %s" % dname)
                        _generate_qseq(get_qseq_dir(dname), config)
                    fastq_dir = None
                    if fastq:
                        logger2.info("Generating fastq files for %s" % dname)
                        fastq_dir = _generate_fastq(dname, config)
                    _post_process_run(dname, config, config_file, fastq_dir,
                                      post_config_file, process_msg, store_msg)
Пример #2
0
    def __call__(self):
        # Find an open port for the logs
        # (that's a race condition, deal with it)
        tmpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        tmpsock.bind(('localhost', 0))
        log_uri = 'tcp://{}:{}'.format(*tmpsock.getsockname())
        tmpsock.close()

        setup = logbook.NestedSetup([
            logbook.NullHandler(),
            logbook.StderrHandler(level=logbook.INFO),
            logbook.Processor(self._process_record),
        ])
        self.subscriber = ZeroMQSubscriber(log_uri, multi=True)
        self.subscriber.dispatch_in_background(setup=setup)

        self.process = sh.python(
            '-m',
            'onitu',
            '--entries',
            self.entries,
            '--log-uri',
            log_uri,
            _bg=self.bg,
        )

        return self.process
Пример #3
0
def request_id_tag_context(title=None, tag=None, logger=None):
    """
    Context that adds a request ID tag logging processor.

    :param title: title to write in the log when setting a new tag (if a previous tag was not set).
                  If ``None`` no log message will be generated.
    :param tag: tag to set before calling func. If tag is None and no existing tag is set a new random tag will be
                created.
    :param logger: logger to use, if None use default logger
    """
    if logger is None:
        logger = get_logger()

    prev_tag, new_tag = get_tag(), tag
    if new_tag is not None:
        set_tag(new_tag)
    elif prev_tag is None:
        new_tag = new_random_tag()
        set_tag(new_tag)

    # We create a logbook.Processor context only if we didn't have a previous tag, otherwise there must already
    # be a context in place somewhere down the call stack.
    with (logbook.Processor(inject_request_id_tag).greenletbound()
          if prev_tag is None else _null_context()):
        if prev_tag is None and title is not None:
            # Log this function since it's our first "tagged" entry to the greenlet
            logger.debug("setting new tag {} on greenlet {}".format(
                new_tag, title))
        try:
            yield
        finally:
            set_tag(prev_tag)
Пример #4
0
def create_logger(app):
    if app.config.get('ENVIRONMENT') == 'production':
        server_addr = ('localhost', 25)
    else:
        server_addr = ('localhost', 2525)

    mail_handler = logbook.MailHandler(
        '*****@*****.**',
        ['*****@*****.**'],
        server_addr=server_addr,
        level='DEBUG',
        format_string=u'''Subject: ERROR at gitorama.com

[{record.time:%Y-%m-%d %H:%M}] {record.extra[request_id]}: {record.level_name}: {record.channel}: {record.message}''',
        related_format_string=
        u'[{record.time:%Y-%m-%d %H:%M}] {record.extra[request_id]}: {record.level_name}: {record.channel}: {record.message}',
    )
    file_handler = logbook.FileHandler(
        app.config['LOG_FILE'],
        level='DEBUG',
        format_string=
        u'[{record.time:%Y-%m-%d %H:%M}] {record.extra[request_id]}: {record.level_name}: {record.channel}: {record.message}'
    )

    def inject_id(record):
        record.extra['request_id'] = getattr(_request_ctx_stack.top,
                                             'logbook_request_id', None)

    logger = logbook.NestedSetup([
        logbook.NullHandler(),
        logbook.FingersCrossedHandler(mail_handler, reset=True),
        logbook.FingersCrossedHandler(file_handler, reset=True, bubble=True),
        logbook.Processor(inject_id),
    ])
    return logger
Пример #5
0
def create_processor(plugin_predicate=_true):
    """
    Returns a new logbook Processor object that injects to each record's `extra` dict whatever the enabled plugins
    choose to put there.
    :param plugin_predicate: predicate that chooses which plugins to enable
    :returns: logbook.Processor object
    """
    return logbook.Processor(create_inject_extra_data(plugin_predicate))
Пример #6
0
    def process_request(self, request):
        def inject_info(record):
            record.extra.update(
                ip=request.META.get('REMOTE_ADDR'),
                method=request.method,
                absolute_uri=request.build_absolute_uri(),
            )

        self._processor = logbook.Processor(inject_info)
        self._processor.push_application()
Пример #7
0
        def handle_request(request):
            def inject_extra(record):
                record.extra['ip'] = request.remote_addr
                record.extra['method'] = request.method
                record.extra['path'] = request.path

            with logbook.Processor(inject_extra):
                with handler:
                    try:
                        1 / 0
                    except Exception:
                        self.log.exception('Exception happened during request')
def inject(**params):
    """
    A Logbook processor to inject arbitrary information into log records.

    Simply pass in keyword arguments and use as a context manager:

        >>> with inject(identifier=str(uuid.uuid4())).applicationbound():
        ...     logger.debug('Something happened')
    """
    def callback(log_record):
        log_record.extra.update(params)

    return logbook.Processor(callback)
Пример #9
0
def main(config_file, fc_dir, run_info_yaml=None):
    config = load_config(config_file)
    work_dir = os.getcwd()
    if config.get("log_dir", None) is None:
        config["log_dir"] = os.path.join(work_dir, "log")

    def insert_command(record):
        record.extra["command"] = sys.argv
        record.extra["version"] = version.get_pipeline_version()

    setup_logging(config)
    handler = create_log_handler(config)
    with handler, \
         logbook.Processor(insert_command):

        run_main(config, config_file, fc_dir, work_dir, run_info_yaml)
Пример #10
0
    def handle_request(request):
        def inject_extra(record):
            record.extra['ip'] = request.remote_addr
            record.extra['method'] = request.method
            record.extra['path'] = request.path

        processor = logbook.Processor(inject_extra)
        with activation_strategy(processor):
            handler.push_thread()
            try:
                try:
                    1 / 0
                except Exception:
                    logger.exception('Exception happened during request')
            finally:
                handler.pop_thread()
Пример #11
0
def search_for_new(*args, **kwargs):
    """Search for any new unreported directories.
    """
    config = args[0]
    reported = _read_reported(config["msg_db"])
    process_all_option = config["algorithm"].get("process_all", False)
    for dname in _get_directories(config):
        # Only process a directory if it isn't listed in the transfer db or if it was specifically requested
        # on the command line
        if os.path.isdir(dname) and \
        ((kwargs.get("run_id",None) is None and not any(dir.startswith(dname) for dir in reported)) or \
         kwargs.get("run_id",None) == os.path.basename(dname)):

            # Injects run_name on logging calls.
            # Convenient for run_name on "Subject" for email notifications
            def run_setter(record):
                return record.extra.__setitem__('run', os.path.basename(dname))

            with logbook.Processor(run_setter):
                if kwargs.get("post_process_only",False):
                    loc_args = (dname, ) + args + (None, )
                    _post_process_run(*loc_args, **kwargs)
                    continue
                if _do_initial_processing(dname):
                    initial_processing(dname, *args, **kwargs)

                if not process_all_option:
                    if _do_first_read_processing(dname):
                        process_first_read(dname, *args, **kwargs)

                    elif _do_second_read_processing(dname):
                        process_second_read(dname, *args, **kwargs)
                    else:
                        pass
                else:
                    #Process both reads at once in the same machine
                    if _is_finished_dumping_checkpoint(dname):
                        process_all(dname, *args, **kwargs)


                # Re-read the reported database to make sure it hasn't
                # changed while processing.
                reported = _read_reported(config["msg_db"])
Пример #12
0
 def _log_messages(self, log_handler, subject="Test email"):
     try:
         with log_handler.applicationbound():
             with logbook.Processor(lambda record: record.extra.__setitem__(
                     'run', subject)):
                 logger2.debug("DEBUG record test generated @ %s" %
                               time.strftime("%x - %X"))
                 logger2.info("INFO record test generated @ %s" %
                              time.strftime("%x - %X"))
                 logger2.notice("NOTICE record test generated @ %s" %
                                time.strftime("%x - %X"))
                 logger2.warning("WARNING record test generated @ %s" %
                                 time.strftime("%x - %X"))
                 logger2.error("ERROR record test generated @ %s" %
                               time.strftime("%x - %X"))
                 logger2.critical("CRITICAL record test generated @ %s" %
                                  time.strftime("%x - %X"))
     except Exception as e:
         return e
     return None
Пример #13
0
        def wrapper(*args, **kwargs):
            start_time = time.time()
            request_id = self.generate_id()

            def inject_request_id(record):
                record.extra['request_id'] = request_id

            with logbook.Processor(inject_request_id):
                logbook.notice(self.request_str(), extra={"api": True})

                try:
                    response = callback(*args, **kwargs)
                except OperationalError as e:
                    logbook.warning("Database is down {}: {}",
                                    conf.database.uri,
                                    e,
                                    exc_info=True)
                    logbook.error("Database is down {}: {}", conf.database.uri,
                                  e)
                    response = errors.DatabaseIsDown()
                except errors.BadRequest as e:
                    e.format_response()
                    response = e
                except bottle.HTTPResponse as e:
                    response = e
                except Exception as e:
                    if self.under_test:
                        import traceback
                        traceback.print_exc()
                    logbook.exception(
                        "Exception during processing request: %s %s" %
                        (bottle.request.method, bottle.request.path))
                    self.log_response(str(e), 500, time.time() - start_time)
                    raise
                finally:
                    from model import db
                    db.session.remove()
                response = self.response_process(response,
                                                 time.time() - start_time)

            return response
Пример #14
0
        def inner(*args, **kwargs):
            try:
                if new_session and not conf.test:
                    db.session.close()
                logbook.debug("Start task {} with args: {} {}", fn.__name__,
                              args, kwargs)

                try:
                    h = "%8x" % abs(hash(args))
                except TypeError:
                    from pprint import pformat
                    h = hash(pformat(args))
                    h = "%8x" % abs(h)
                request_id = "%s-%s" % (fn.__name__, h[0:4])

                def inject_request_id(record):
                    record.extra['request_id'] = request_id

                with logbook.Processor(inject_request_id):
                    res = fn(*args, **kwargs)
                if auto_commit:
                    db.session.commit()
                logbook.debug("Result of task {}: {}", fn.__name__, res)
                return res
            except OperationalError as operation_error:
                logbook.warning("Database is down {}: {}",
                                conf.database.uri,
                                operation_error,
                                exc_info=True)
                logbook.error("Database is down {}: {}", conf.database.uri,
                              operation_error)
                db.session.close()
                current_task.retry(exc=operation_error,
                                   countdown=calc_exp_countdown())
            except Exception as exc:
                logbook.warning("{} failed. Retrying...",
                                fn.__name__,
                                exc_info=True)
                current_task.retry(exc=exc, countdown=calc_exp_countdown())
Пример #15
0
 def dummy(self):
     with logbook.Processor(self.tag_progress):
         logger2.info("HACK")
Пример #16
0
 def progress(self, action):
     self.step += 1
     self._timestamp_file(action)
     with logbook.Processor(self.tag_progress):
         logger2.info(action)
Пример #17
0
    def process_request(self, request):
        """the main request processing. Gets a :class:`~starflyer.Request` as input and
        returns an instance of :class:`~starflyer.Response`. In case of an exception
        this will be handled as well
        """
        handler = None

        # check if we are the first request ever for this application
        self.check_first_request(request)

        # helper for injecting data into the log record
        def inject(record):
            """the injection callback for any log record"""
            record.extra['url'] = request.url
            record.extra['method'] = request.method
            record.extra['ip'] = request.remote_addr
            record.hid = id(request)
            # TODO: add a hook for adding more information

        # use the log context to actually call the handler
        with self.setup_logger():
            with logbook.Processor(inject):
                try:
                    # find the handler
                    handler = self.find_handler(request)

                    # run the before_handler hooks from app and modules
                    if handler.use_hooks:
                        for module in self.modules:
                            rv = module.before_handler(handler)
                            if rv is not None:
                                return rv
                        rv = self.before_handler(handler)
                        if rv is not None:
                            return rv

                    # in case we are in testing mode remember the last used handler
                    if self.config.testing:
                        self.last_handler = handler

                    # call the handler and receive the response
                    response = handler(**request.view_args)
                    if handler.use_hooks:
                        for module in self.modules:
                            rv = module.after_handler(
                                handler, response
                            )  # hook for post processing a resposne
                            if rv is not None:
                                return rv
                        rv = self.after_handler(
                            handler,
                            response)  # hook for post processing a resposne
                        if rv is not None:
                            return rv

                except Exception, e:
                    response = self.handle_user_exception(request, e)

                # now save the session after the after handlers might have changed it
                if handler and not self.session_interface.is_null_session(
                        handler.session):
                    self.save_session(handler.session, response)
Пример #18
0
def report_to_statusdb(fc_name, fc_date, run_info_yaml, dirs, config):
    """
    Create statusdb report on a couchdb server.

    A FlowcellQCMetrics object holds information about a flowcell. QC
    results are stored at the flowcell level and sample level
    depending on analysis. Lane level QC data are stored in the
    FlowcellQCMetrics object.
    """
    success = True
    try:
        statusdb_config = config.get("statusdb", None)
        if statusdb_config is None:
            log.info(
                "Could not find statusdb section in configuration. No statusdb reporting will be done"
            )
            return False

        statusdb_url = statusdb_config.get("url", None)
        if statusdb_url is None:
            log.warn("No url field found in statusdb configuration section.")
            return False

        # Add email notification
        email = statusdb_config.get("statusdb_email_notification", None)
        smtp_host = config.get("smtp_host", "")
        smtp_port = config.get("smtp_port", "")
        log_handler = create_log_handler(
            {
                'email': email,
                'smtp_host': smtp_host,
                'smtp_port': smtp_port
            }, True)

        with log_handler.applicationbound():
            with logbook.Processor(lambda record: record.extra.__setitem__(
                    'run', "%s_%s" % (fc_date, fc_name))):
                log.info(
                    "Started creating QC Metrics report on statusdb for %s_%s on %s"
                    % (fc_date, fc_name, datetime.now().isoformat()))

                # Create object and parse all available metrics; no checking
                # is currently done for missing files
                try:
                    qc_obj = FlowcellQCMetrics(fc_date, fc_name, run_info_yaml,
                                               dirs.get("work", None),
                                               dirs.get("flowcell", None))
                except:
                    qc_obj = None
                # FIXME: error checking!
                if qc_obj is not None:
                    try:
                        # Save data at a sample level
                        log.info("Connecting to server at %s" % statusdb_url)
                        try:
                            couch = couchdb.Server(url="http://%s" %
                                                   statusdb_url)
                        except:
                            log.warn("Connecting to server at %s failed" %
                                     statusdb_url)
                        log.info("Connecting to server at %s succeeded" %
                                 statusdb_url)
                        db = couch['qc']
                        # Save samples
                        for s in qc_obj.sample.keys():
                            obj = qc_obj.sample[s]
                            log.info("Saving sample %s" % obj.name())
                            _save_obj(db, obj, statusdb_url)
                        # Save flowcell object
                        _save_obj(db, qc_obj, statusdb_url)
                    except Exception as e:
                        success = False
                else:
                    log.warn(
                        "Couldn't populate FlowcellQCMetrics object. No QC data written to statusdb for %s_%s"
                        % (fc_date, fc_name))
                    success = False
            if success:
                log.info("QC Metrics report successfully written to statusdb for %s_%s on %s" \
                         % (fc_date, fc_name, datetime.now().isoformat()))
            else:
                log.warn("Encountered exception when writing to statusdb for %s_%s on %s" \
                         % (fc_date, fc_name, datetime.now().isoformat()))

    except Exception as e:
        success = False
        log.warn(
            "Encountered exception when writing QC metrics to statusdb: %s" %
            e)

    return success
Пример #19
0
def main(input_path, transferred_db, run_folder, uppnexid, samplesheet, logfile, email_notification, config_file, force, dryrun):
    
    config = {}
    if config_file is not None:
        config = load_config(config_file)
    
    if logfile is None:
        logfile = config.get("logfile",os.path.normpath(os.path.expanduser(DEFAULT_LOGFILE)))
    
    email_handler = None
    # Don't write dry runs to log
    if dryrun:
        handler = logbook.StreamHandler(sys.stdout)
    else:
        if not os.path.exists(logfile):
            safe_makedir(os.path.dirname(logfile))
            open(logfile,"w").close()
        handler = logbook.FileHandler(logfile)
        
        if email_notification is None:
            email_notification = config.get("email_recipient",DEFAULT_RECIPIENT)
        recipients = email_notification.split(",")
        if len(recipients) > 0:
            email_handler = logbook.MailHandler("*****@*****.**", recipients, 
                                                server_addr=[config.get("smtp_host",DEFAULT_SMTP_HOST),config.get("smtp_port",DEFAULT_SMTP_PORT)],
                                                format_string=u'''Subject: [MiSeq delivery] {record.extra[run]}\n\n {record.message}''')
                
    with handler.applicationbound():
        
        if dryrun:
            logger2.info("This is just a dry-run. Nothing will be delivered and no directories will be created/changed")
            
        # If no run folder was specified, try with the folders in the input_path
        if run_folder is None:    
            pat = "*_M*_AMS*"
            folders = [os.path.relpath(os.path.normpath(file),input_path) for file in glob.glob(os.path.join(input_path,pat))]
        else:
            run_folder = os.path.basename(os.path.normpath(run_folder))
            assert os.path.exists(os.path.join(input_path,run_folder)), "The specified run folder %s does not seem to exist in the %s folder" % (run_folder, input_path)
            folders = [run_folder]
        
        logger2.info("Will process %s folders: %s" % (len(folders),folders))
        
        # Parse the supplied db of transferred flowcells, or a db in the default location if present
        if transferred_db is None:
            transferred_db = os.path.normpath(config.get("transfer_db",os.path.expanduser(DEFAULT_DB))) 
            assert os.path.exists(transferred_db), "Could not locate transferred_db (expected %s)" % transferred_db
        
        logger2.info("Transferred db is %s" % transferred_db)
        
        # Process each run folder
        for folder in folders:
            
            try:
                
                # Skip this folder if it has already been processed
                logger2.info("Processing %s" % folder)
                if _is_processed(folder,transferred_db) and not force:
                    logger2.info("%s has already been processed, skipping" % folder) 
                    continue
            
                # Locate the samplesheet and pasre the uppnex id if necessary
                if uppnexid is None:
                    local_samplesheet = samplesheet
                    if local_samplesheet is None: local_samplesheet = os.path.join(input_path,folder,config.get("samplesheet_name",DEFAULT_SS_NAME))
                    assert os.path.exists(local_samplesheet), "Could not find expected sample sheet %s" % local_samplesheet
                    local_uppnexid = _fetch_uppnexid(local_samplesheet, config.get("uppnexid_field",DEFAULT_UPPNEXID_FIELD))
                    assert local_uppnexid is not None and len(local_uppnexid) > 0, "Could not parse Uppnex ID for project from samplesheet %s" % local_samplesheet
                else:
                    local_uppnexid = uppnexid
                    
                logger2.info("Will deliver to inbox of project %s" % local_uppnexid)
                
                # Locate the fastq-files to be delivered
                pat = os.path.join(input_path,folder,config.get("fastq_path",DEFAULT_FQ_LOCATION),"*.fastq.gz")
                fq_files = glob.glob(pat)
                # Also search directly in the folder
                pat = os.path.join(input_path,folder,"*.fastq.gz")
                fq_files.extend(glob.glob(pat))
                assert len(fq_files) > 0, "Could not locate fastq files for folder %s using pattern %s" % (folder,pat)
                
                logger2.info("Found %s fastq files to deliver: %s" % (len(fq_files),fq_files))
                if dryrun:
                    logger2.info("Remember that this is a dry-run. Nothing will be delivered and no directories will be created/changed")
                    
                # Create the destination directory if required
                dest_dir = os.path.normpath(os.path.join(config.get("project_root",DEFAULT_PROJECT_ROOT),local_uppnexid,"INBOX",folder,"fastq"))
                
                _update_processed(folder,transferred_db,dryrun)
                assert _create_destination(dest_dir, dryrun), "Could not create destination %s" % dest_dir
                assert _deliver_files(fq_files,dest_dir, dryrun), "Could not transfer files to destination %s" % dest_dir
                assert _verify_files(fq_files,dest_dir,dryrun), "Integrity of files in destination directory %s could not be verified. Please investigate" % dest_dir
                assert _set_permissions(dest_dir, dryrun), "Could not change permissions on destination %s" % dest_dir
                
                if email_handler is not None:
                    with email_handler.applicationbound():
                        with logbook.Processor(lambda record: record.extra.__setitem__('run', folder)):
                            logger2.info("The MiSeq sequence data for run %s was successfully delivered to the inbox of Uppnex project %s (%s)" % (folder,local_uppnexid,dest_dir))
                
            except AssertionError as e:
                logger2.error("Could not deliver data from folder %s. Reason: %s. Please fix problems and retry." % (folder,e))
                logger2.info("Rolling back changes to %s" % transferred_db)
                _update_processed(folder,transferred_db,dryrun,True)
Пример #20
0
def create_report_on_gdocs(fc_date, fc_name, run_info_yaml, dirs, config):
    """Create reports on gdocs containing both demultiplexed read counts and QC data.
    """
    success = True
    try:
        # Inject the fc_date and fc_name in the email subject
        def record_processor(record):
            return record.extra.__setitem__('run',
                                            "%s_%s" % (fc_date, fc_name))

        # Parse the run_info.yaml file
        log.debug("Loading this run_info: {}".format(run_info_yaml))
        with open(run_info_yaml, "r") as fh:
            run_info = yaml.load(fh)

        # Get the gdocs account credentials
        encoded_credentials = get_credentials(config)
        if not encoded_credentials:
            log.warn(
                "Could not find Google Docs account credentials in configuration. \
                      No sequencing report was written")
            return False

        # Get the required parameters from the post_process.yaml configuration file
        gdocs = config.get("gdocs_upload", None)

        # Add email notification
        email = gdocs.get("gdocs_email_notification", None)
        smtp_host = config.get("smtp_host", "")
        smtp_port = config.get("smtp_port", "")
        log_handler = create_log_handler({'email': email, \
                                          'smtp_host': smtp_host, \
                                          'smtp_port': smtp_port}, True)

    except Exception as e:
        success = False
        log.warn(
            "Encountered exception when writing sequencing report to Google Docs: %s"
            % e)

    with log_handler.applicationbound(), logbook.Processor(record_processor):
        try:
            log.info("Started creating sequencing report on Google docs for %s_%s on %s" \
                % (fc_date, fc_name, datetime.datetime.now().isoformat()))

            # Get a flowcell object
            fc = Flowcell(fc_name, fc_date, run_info, dirs.get("work", None))

            # Get the GDocs demultiplex result file title
            gdocs_dmplx_spreadsheet = gdocs.get("gdocs_dmplx_file", None)
            # Get the GDocs QC file title
            gdocs_qc_spreadsheet = gdocs.get("gdocs_qc_file", None)

            # FIXME: Make the bc stuff use the Flowcell module
            if gdocs_dmplx_spreadsheet is not None:
                # Upload the data
                bc_metrics.write_run_report_to_gdocs(fc, fc_date, \
                    fc_name, gdocs_dmplx_spreadsheet, encoded_credentials, append=True)
            else:
                log.warn("Could not find Google Docs demultiplex results file \
                    title in configuration. No demultiplex counts were \
                    written to Google Docs for %s_%s" % (fc_date, fc_name))

            # Parse the QC metrics
            try:
                qc = RTAQCMetrics(dirs.get("flowcell", None))
            except:
                qc = None

            if gdocs_qc_spreadsheet is not None and qc is not None:
                qc_metrics.write_run_report_to_gdocs(fc, qc,
                                                     gdocs_qc_spreadsheet,
                                                     encoded_credentials)
            else:
                log.warn("Could not find Google Docs QC file title in configuration. " \
                         "No QC data were written to Google Docs " \
                         "for %s_%s".format(fc_date, fc_name))

            # Get the projects parent folder
            projects_folder = gdocs.get("gdocs_projects_folder", None)

            # Write the bc project summary report
            if projects_folder is not None:
                create_project_report_on_gdocs(fc, qc, \
                    encoded_credentials, projects_folder)

        except Exception as e:
            success = False
            log.warn("Encountered exception when writing sequencing report " \
                     "to Google Docs: {}".format(e))

        if success:
            log.info("Sequencing report successfully created on Google " \
                     "docs for {}_{} on {}".format(fc_date, fc_name, datetime.datetime.now().isoformat()))
        else:
            log.warn("Encountered exception when writing sequencing " \
                     "report for %s_%s to Google docs on %s" \
                     % (fc_date, fc_name, datetime.datetime.now().isoformat()))

    return success
Пример #21
0
 def _process_test_record(self):
     return logbook.Processor(self._add_current_test)
Пример #22
0
 def wrapped(address, *a):
     with logbook.Processor(inject_address).threadbound():
         return func(address, *a)