def test_read_contigs_file(self):
     cur_dir = os.path.dirname(__file__)
     file_name = os.path.join(cur_dir, "fixtures/generated_contigs_test.fna")
     open_file = open(file_name, "r")
     contigs = read_contigs_file(open_file)
     assert_equal(len(contigs), 29)
     assert_equal(contigs[0].id, "Ehrlichia_canis_Jake_uid58071")
     assert_equal(contigs[0].contig_id, 1000)
     assert_equal(contigs[0].family, "Anaplasmataceae")
def main(contigs_file,taxonomy_file, dir_path, kmer_length, contig_length, algorithm):

    groups = []
    DNA.generate_kmer_hash(kmer_length)

    contigs = read_contigs_file(contigs_file,start_position=True)
    
    # Divide genomes into groups, one for each genus
    meta_genomes = genome_info_from_parsed_taxonomy_file(taxonomy_file)

    # Fetch sequence for each genome
    genomes = read_FASTA_files_no_groups(meta_genomes, dir_path)

    genome_part_l = 10000
    for genome in genomes:
        genome.calculate_signature()
        genome.parts = genome.split_seq(genome_part_l)
        for part in genome.parts:
            part.calculate_signature()
        genome.pseudo_par = model.fit_nonzero_parameters(\
            genome.parts, algorithm = algorithm)

    scores = []
    for contig in contigs:
        contig.calculate_signature()
        for genome in genomes:
            if contig.id == genome.id:
                s = int(contig.start_position)
                start_part_index = s/genome_part_l
                end_part_index = (s+contig_length)/genome_part_l
                if start_part_index == end_part_index:
                    i = start_part_index
                    temp_pseudo_par = model.fit_nonzero_parameters(\
                        genome.parts[0:i]+genome.parts[i+1:],
                        algorithm=algorithm)
                else:
                    i1 = start_part_index
                    i2 = end_part_index
                    temp_pseudo_par = model.fit_nonzero_parameters(\
                        genome.parts[0:i1]+genome.parts[i2+1:],
                        algorithm=algorithm)

                p_val = model.log_probability(\
                    contig, temp_pseudo_par)
            else:
                p_val = model.log_probability(\
                    contig, genome.pseudo_par)
            scores.append(\
                Score(p_val, contig, genome, contig.contig_id))

    sys.stdout.write("p_value\tcontig_family\tcontig_genus\tcontig_species\tcontig_genome\tcompare_family\tcompare_genus\tcompare_species\tcompare_genome\tcontig_id" + os.linesep)
    for score in scores:
        sys.stdout.write(str(score) + '\n')
def main(contigs_file, taxonomy_file, dir_path, kmer_length, dir_structure, taxonomy_info_in_contigs):

    groups = []
    DNA.generate_kmer_hash(kmer_length)

    contigs = read_contigs_file(contigs_file, taxonomy_info=taxonomy_info_in_contigs)

    # Divide genomes into groups, one for each genus
    meta_genomes = genome_info_from_parsed_taxonomy_file(taxonomy_file)

    # Fetch sequence for each genome
    genomes = read_FASTA_files_no_groups(meta_genomes, dir_path, dir_structure=dir_structure)

    for genome in genomes:
        genome.calculate_signature()
        genome.pseudo_par = mn.fit_nonzero_parameters([genome])

    scores = []
    for contig in contigs:
        contig.calculate_signature()
        for genome in genomes:
            if contig.id == genome.id:
                temp_genome = deepcopy(genome)
                temp_genome.signature.subtract(contig.signature)
                temp_pseudo_par = mn.fit_nonzero_parameters([temp_genome])
                p_val = mn.log_probability(contig, temp_pseudo_par)
            else:
                p_val = mn.log_probability(contig, genome.pseudo_par)
            scores.append(Score(p_val, contig, genome, contig.contig_id, taxonomy_info=taxonomy_info_in_contigs))

    if taxonomy_info_in_contigs:
        sys.stdout.write(
            "p_value\tcontig_family\tcontig_genus\tcontig_species\tcontig_genome\tcompare_family\tcompare_genus\tcompare_species\tcompare_genome\tcontig_id"
            + os.linesep
        )
    else:
        sys.stdout.write(
            "p_value\t\tcontig_genome\tcompare_family\tcompare_genus\tcompare_species\tcompare_genome\tcontig_id"
            + os.linesep
        )
    for score in scores:
        sys.stdout.write(str(score) + "\n")
def main(contigs_file,taxonomy_file, dir_path, kmer_length, contig_length):

    groups = []
    DNA.generate_kmer_hash(kmer_length)

    contigs = read_contigs_file(contigs_file,start_position=True)
    
    # Divide genomes into groups, one for each genus
    meta_genomes = genome_info_from_parsed_taxonomy_file(taxonomy_file)

    # Fetch sequence for each genome
    genomes = read_FASTA_files_no_groups(meta_genomes, dir_path)

    genome_part_l = 10000
    for genome in genomes:
        genome.calculate_signature()
        genome.parts = genome.split_seq(genome_part_l)
        for part in genome.parts:
            part.calculate_signature()
        alpha_fit =  model.fit_nonzero_parameters_full_output(\
            genome.parts)
        sys.stderr.write(str(alpha_fit)+'\n')
        genome.pseudo_par = alpha_fit[0]

    scores = []
    for contig in contigs:
        contig.calculate_signature()
        contig.pseudo_counts_array = np.fromiter(contig.pseudo_counts,np.dtype('u4'),DNA.kmer_hash_count).reshape((1,DNA.kmer_hash_count))
        for genome in genomes:
            p_val = model.log_probability(\
                contig, genome.pseudo_par, pseudo_counts_supplied=True)
            scores.append(\
                Score(p_val, contig, genome, contig.contig_id))

    sys.stdout.write("p_value\tcontig_family\tcontig_genus\tcontig_species\tcontig_genome\tcompare_family\tcompare_genus\tcompare_species\tcompare_genome\tcontig_id" + os.linesep)
    for score in scores:
        sys.stdout.write(str(score) + '\n')
def main(contigs_file,contig_time_series_file, genome_time_series_file, taxonomy_file,dir_path, contig_length, total_read_count,assembly_length,first_data,last_data):

    DNA.generate_kmer_hash(2)

    contigs = read_contigs_file(contigs_file,start_position=True)
    
    contig_time_series_df = read_time_series(contig_time_series_file)

    if len(contigs)!=len(contig_time_series_df.index):
        raise TypeError("The number of contigs and time series does not match")
    
    for contig in contigs:
        contig.mapping_reads = contig_time_series_df[contig_time_series_df.contig_id == contig.contig_id]

    # Divide genomes into groups, one for each genus
    meta_genomes = genome_info_from_parsed_taxonomy_file(taxonomy_file)

    # Fetch sequence for each genome
    genomes = read_FASTA_files_no_groups(meta_genomes, dir_path)

    # Fetch time series for each genome
    read_time_series_file_genomes(genomes, genome_time_series_file)

    for genome in genomes:
        genome.pseudo_par = model.fit_nonzero_parameters([genome],total_read_count)

    scores = []
    for contig in contigs:
        for genome in genomes:
            p_val = model.log_probability(\
                    contig, genome.pseudo_par, total_read_count,assembly_length)
            scores.append(\
                Score(p_val, contig, genome, contig.contig_id))

    sys.stdout.write("p_value\tcontig_family\tcontig_genus\tcontig_species\tcontig_genome\tcompare_family\tcompare_genus\tcompare_species\tcompare_genome\tcontig_id" + os.linesep)
    for score in scores:
        sys.stdout.write(str(score) + '\n')