示例#1
0
        for cseq in amp.cached_sequences:
            job_queue.add(JOB_ID_PROCESS_SNPS, ProcessSNPMessage(cached_sequence_id=cseq.id),
                          parent_job=job.parent)
    
    job_queue.finish(job, None)
        


class AmpliconWorker(PasterDaemonContextProcess):
    def run(self, config_path, as_daemon=False):
        global process_assay_job, process_amplicon_job

        mgr                  = get_manager(config_path)
        jobqueue             = mgr.jobqueue()
        sequence_source      = mgr.sequence_source()
        tm_calc = mgr.tm_calc(mgr)
        dg_calc = mgr.dg_calc(mgr)

        # does the thread knock it out?
        assay_thread    = LogExcRepeatedThread(10, process_assay_job, LOGGER_NAME, jobqueue, tm_calc, dg_calc)
        sequence_thread = LogExcRepeatedThread(15, process_amplicon_job, LOGGER_NAME, jobqueue, sequence_source, dg_calc)
        if as_daemon:
            assay_thread.daemon = True
            sequence_thread.daemon = True

        assay_thread.start()
        sequence_thread.start()

if __name__ == "__main__":
    worker = PasterLikeProcess('amplicons.pid')
    worker.run(AmpliconWorker)
示例#2
0
                logger.exception("Error from Reprocess worker [job %s]: Non-zero result status (%d)" % job.id, result )
                job_queue.abort(job, JSONErrorMessage("Unable add to qtools: Non-zero result status (%d)" % result))
                continue

 
    # this is key; otherwise, the SQL connection pool will be sucked up.
    Session.close()


class ReprocessWorker(PasterDaemonContextProcess):
    def run(self, config_path, as_daemon=False):
        global process_reprocess_job

        mgr = get_manager(config_path)
        
        jobqueue = mgr.jobqueue()

        config = mgr.pylons_config

        # TODO add as runtime argument
        reprocess_thread = LogExcRepeatedThread(10, process_reprocess_job, LOGGER_NAME, jobqueue, config )

        if as_daemon:
            reprocess_thread.daemon = True
        
        reprocess_thread.start()

if __name__ == "__main__":
    worker = PasterLikeProcess('reprocess.pid')
    worker.run(ReprocessWorker)
示例#3
0
                    start = snp['chromStart']
                else:
                    start = snp['chromStart']+1
                end = snp['chromEnd']
                message = ProcessSNPAmpliconMessage(sequence_group_id, chromosome, start, end)
                job_queue.add(JOB_ID_PROCESS_SNP_AMPLICON, message, parent_job=job)
            
            # TODO: need to be in transaction?
            job_queue.progress(job)
    
    Session.close()


class SNPWorker(PasterDaemonContextProcess):
    def run(self, config_path, as_daemon=False):
        global process_snp_job

        snp_table = 'snp131'
        mgr = get_manager(config_path)
        jobqueue = mgr.jobqueue()
        # TODO add as runtime argument
        snp_source = mgr.snp_source(snp_table='snp131')
        snp_thread = LogExcRepeatedThread(10, process_snp_job, LOGGER_NAME, jobqueue, snp_source, snp_table)
        if as_daemon:
            snp_thread.daemon = True
        
        snp_thread.start()

if __name__ == "__main__":
    worker = PasterLikeProcess('snps.pid')
    worker.run(SNPWorker)