Exemple #1
0
def process_taqman_job(job, job_queue, sequence_source, dg_calc):
    logger = logging.getLogger(LOGGER_NAME)

    struct            = JSONMessage.unserialize(job.input_message)
    forward_primer_id = struct.forward_primer_id
    reverse_primer_id = struct.reverse_primer_id
    probe_ids         = struct.probe_ids

    sequence_group    = Session.query(SequenceGroup).get(struct.sequence_group_id)
    fp_sequence       = Session.query(SequenceGroupComponent).get(forward_primer_id).sequence
    rp_sequence       = Session.query(SequenceGroupComponent).get(reverse_primer_id).sequence
    probes            = Session.query(SequenceGroupComponent).filter(SequenceGroupComponent.id.in_(probe_ids)).all()
    probe_seqs        = [p.sequence for p in probes]
    
    sequences = sequence_source.sequences_for_primers(fp_sequence.sequence, rp_sequence.sequence,
                                                      fwd_prefix_length=MAX_CACHE_PADDING,
                                                      rev_suffix_length=MAX_CACHE_PADDING)
    
    if sequences is None:
        logger.error("Amplicon TaqMan: Could not get response from server [job %s]" % job.id)
        job_queue.abort(job, JSONErrorMessage('Could not get response from server.'))
        Session.commit()
        return
    
    amplicons = create_amplicons_from_pcr_sequences(sequence_group,
                                                    forward_primer=fp_sequence,
                                                    reverse_primer=rp_sequence,
                                                    probes=probe_seqs,
                                                    pcr_sequences=sequences)
    
    for amp in amplicons:
        populate_amplicon_dgs(amp, dg_calc)
    
    logger.info("Taqman job completed [job %s]" % job.id)
    Session.commit()
    
    # TODO: add finish message
    
    # now add SNPs to cached sequences
    for amp in amplicons:
        for cseq in amp.cached_sequences:
            job_queue.add(JOB_ID_PROCESS_SNPS, ProcessSNPMessage(cached_sequence_id=cseq.id),
                          parent_job=job.parent)
    
    # avoid condition where job completed -- clear child job first
    job_queue.finish(job, None)
Exemple #2
0
def process_location_job(job, job_queue, sequence_source, dg_calc):
    logger = logging.getLogger(LOGGER_NAME)

    struct         = JSONMessage.unserialize(job.input_message)
    
    sequence_group = Session.query(SequenceGroup).get(struct.sequence_group_id)
    sequence = None
    try:
        sequence = sequence_source.sequence_around_loc(sequence_group.location_chromosome,
                                                        sequence_group.location_base,
                                                        sequence_group.amplicon_length,
                                                        prefix_length=MAX_CACHE_PADDING,
                                                        suffix_length=MAX_CACHE_PADDING)
    except Exception:
        logger.exception("Could not retrieve sequence for assay location [job %s]: " % job.id)
        job_queue.abort(job, JSONErrorMessage('Could not retrieve the sequence for the specified amplicon location.'))
        Session.commit()
        return
    
    if sequence is None:
        logger.error("Amplicon Location: could not get response from server [job %s]: " % job.id)
        job_queue.abort(job, JSONErrorMessage('Could not get response from server.'))
        Session.commit()
        return
    
    amplicons = create_amplicons_from_pcr_sequences(sequence_group,
                                                    pcr_sequences=[sequence])
    
    for amp in amplicons:
        populate_amplicon_dgs(amp, dg_calc)
    
    logger.info("Location job completed [job %s]" % job.id)
    Session.commit()

    for amp in amplicons:
        for cseq in amp.cached_sequences:
            job_queue.add(JOB_ID_PROCESS_SNPS, ProcessSNPMessage(cached_sequence_id=cseq.id),
                          parent_job=job.parent)
    
    job_queue.finish(job, None)
Exemple #3
0
def process_snp_job(job, job_queue, sequence_source, dg_calc):
    logger = logging.getLogger(LOGGER_NAME)

    struct         = JSONMessage.unserialize(job.input_message)
    sequence_group = Session.query(SequenceGroup).get(struct.sequence_group_id)
    chromosome     = struct.chromosome
    snp_start      = struct.start
    snp_end        = struct.end

    sequence = None
    try:
        sequence   = sequence_source.sequence_around_region(chromosome, snp_start, snp_end,
                                                            sequence_group.amplicon_length,
                                                            prefix_length=MAX_CACHE_PADDING,
                                                            suffix_length=MAX_CACHE_PADDING)
    except Exception:
        logger.exception("Could not retrieve sequence around SNP location for [job %s]" % job.id)
        job_queue.abort(job, JSONErrorMessage("Could not retrieve sequence around SNP location."))
        Session.commit()
        return
    
    if sequence is None:
        logger.error("Amplicon SNP: could not get response from server [job %s]" % job.id)
        job_queue.abort(job, JSONErrorMessage('Could not get response from server.'))
        Session.commit()
        return
    
    amplicons      = create_amplicons_from_pcr_sequences(sequence_group,
                                                         pcr_sequences=[sequence])
    
    logger.info("SNP assay job completed [job %s]" % job.id)
    Session.commit()

    for amp in amplicons:
        for cseq in amp.cached_sequences:
            job_queue.add(JOB_ID_PROCESS_SNPS, ProcessSNPMessage(cached_sequence_id=cseq.id),
                          parent_job=job.parent)
    
    job_queue.finish(job, None)