def assembly_worker(input_queue, 
                    locus_id_value_obj,
                    gene_id_value_obj,
                    tss_id_value_obj,
                    t_id_value_obj,
                    worker_prefix,
                    config):
    # setup output files
    gtf_fileh = None
    bed_fileh = None
    bedgraph_filehs = [None, None, None]
    if config.create_gtf:
        gtf_fileh = open(worker_prefix + ".gtf", "w")
    if config.create_bed:
        bed_fileh = open(worker_prefix + ".bed", "w")
    if config.create_bedgraph:
        for strand in xrange(0,3):
            filename = '%s_%s.bedgraph' % (worker_prefix, 
                                           STRAND_NAMES[strand])
            fileh = open(filename, 'w')
            bedgraph_filehs[strand] = fileh
    # process input
    while True:
        lines = input_queue.get()
        if len(lines) == 0:
            break
        transcripts = transcripts_from_gtf_lines(lines)
        # conserve memory
        del lines
        # assign scores to each transcript
        for t in transcripts:
            if config.scoring_mode == "unweighted":
                t.score = 1.0
            elif config.scoring_mode == "gtf_attr":
                score = t.attrs.get(config.gtf_score_attr, '0')
                t.score = float_check_nan(score)
        # assemble
        assemble_locus(transcripts,
                       locus_id_value_obj,
                       gene_id_value_obj,
                       tss_id_value_obj,
                       t_id_value_obj,                       
                       config,
                       gtf_fileh,
                       bed_fileh,
                       bedgraph_filehs)
        input_queue.task_done()
    # cleanup output files
    if config.create_bed:
        bed_fileh.close()
    if config.create_gtf:
        gtf_fileh.close()
    if config.create_bedgraph:
        for fileh in bedgraph_filehs:
            fileh.close()
    input_queue.task_done()
def assembly_worker(input_queue, 
                    locus_id_value_obj,
                    gene_id_value_obj,
                    tss_id_value_obj,
                    t_id_value_obj,
                    worker_prefix,
                    config):
    # setup output files
    gtf_fileh = None
    bed_fileh = None
    bedgraph_filehs = [None, None, None]
    if config.create_gtf:
        gtf_fileh = open(worker_prefix + ".gtf", "w")
    if config.create_bed:
        bed_fileh = open(worker_prefix + ".bed", "w")
    if config.create_bedgraph:
        for strand in xrange(0,3):
            filename = '%s_%s.bedgraph' % (worker_prefix, 
                                           STRAND_NAMES[strand])
            fileh = open(filename, 'w')
            bedgraph_filehs[strand] = fileh
    # process input
    while True:
        lines = input_queue.get()
        if len(lines) == 0:
            break
        transcripts = transcripts_from_gtf_lines(lines)
        # assign scores to each transcript
        for t in transcripts:
            if config.scoring_mode == "unweighted":
                t.score = 1.0
            elif config.scoring_mode == "gtf_attr":
                score = t.attrs.get(config.gtf_score_attr, '0')
                t.score = float_check_nan(score)
        # assemble
        assemble_locus(transcripts,
                       locus_id_value_obj,
                       gene_id_value_obj,
                       tss_id_value_obj,
                       t_id_value_obj,                       
                       config,
                       gtf_fileh,
                       bed_fileh,
                       bedgraph_filehs)
        input_queue.task_done()
    # cleanup output files
    if config.create_bed:
        bed_fileh.close()
    if config.create_gtf:
        gtf_fileh.close()
    if config.create_bedgraph:
        for fileh in bedgraph_filehs:
            fileh.close()
    input_queue.task_done()
def annotate_gtf_worker(input_queue, gtf_file, gtf_sample_attr): 
    fileh = open(gtf_file, 'w')
    while True:
        lines = input_queue.get()
        if len(lines) == 0:
            break             
        transcripts = transcripts_from_gtf_lines(lines)
        annotate_locus(transcripts, gtf_sample_attr) 
        for t in transcripts:
            for f in t.to_gtf_features():
                print >>fileh, str(f)
        input_queue.task_done()
        # explicitly delete large objects
        del lines
        del transcripts
    fileh.close()
    input_queue.task_done()
def annotate_gtf_worker(input_queue, gtf_file, gtf_sample_attr):
    fileh = open(gtf_file, 'w')
    while True:
        lines = input_queue.get()
        if len(lines) == 0:
            break
        transcripts = transcripts_from_gtf_lines(lines)
        annotate_locus(transcripts, gtf_sample_attr)
        for t in transcripts:
            for f in t.to_gtf_features():
                print >> fileh, str(f)
        input_queue.task_done()
        # explicitly delete large objects
        del lines
        del transcripts
    fileh.close()
    input_queue.task_done()