示例#1
0
def main():

    print("poop")

    prefix_500 = "500"
    prefix_1000 = "1000"
    prefix_2000 = "2000"

    suffix_start_count = 1
    suffix_end_count = 10

    design_kmer_list = kmers.get_design_kmers()

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):

        input_file_name = prefix_500 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        mapping_list = BruteForceMapping.get_brute_force_mapping(sequence_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, mapping_list)

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):
        input_file_name = prefix_1000 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        mapping_list = BruteForceMapping.get_brute_force_mapping(sequence_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, mapping_list)

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):
        input_file_name = prefix_2000 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        mapping_list = BruteForceMapping.get_brute_force_mapping(sequence_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, mapping_list)
示例#2
0
def main():

    print("poop")

    prefix_500 = "500"
    prefix_1000 = "1000"
    prefix_2000 = "2000"

    suffix_start_count = 1
    suffix_end_count = 10

    design_kmer_list = kmers.get_design_kmers()

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):

        input_file_name = prefix_500 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        mapping_list = BruteForceMapping.get_brute_force_mapping(
            sequence_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, mapping_list)

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):
        input_file_name = prefix_1000 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        mapping_list = BruteForceMapping.get_brute_force_mapping(
            sequence_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, mapping_list)

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):
        input_file_name = prefix_2000 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        mapping_list = BruteForceMapping.get_brute_force_mapping(
            sequence_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, mapping_list)
def main():

    prefix_500 = "500"
    prefix_1000 = "1000"
    prefix_2000 = "2000"

    suffix_start_count = 1
    suffix_end_count = 10

    design_kmer_list = kmers.get_design_kmers()

    use_cluster_size_hard_stop = False

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):

        input_file_name = prefix_500 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        cluster_dict = Clustering.get_cluster_dict(sequence_kmer_list,
                                                   design_kmer_list,
                                                   use_cluster_size_hard_stop)
        pickle.dump(
            cluster_dict,
            open("clusters_no_hard_stop" + input_file_name + ".p", "wb"))
        consensus_kmer_list = cluster_dict.keys()
        consensus_mapping_list = get_bipartite_matching(
            consensus_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, consensus_mapping_list,
                                     cluster_dict)

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):
        input_file_name = prefix_1000 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        cluster_dict = Clustering.get_cluster_dict(sequence_kmer_list,
                                                   design_kmer_list,
                                                   use_cluster_size_hard_stop)
        pickle.dump(
            cluster_dict,
            open("clusters_no_hard_stop" + input_file_name + ".p", "wb"))
        consensus_kmer_list = cluster_dict.keys()
        consensus_mapping_list = get_bipartite_matching(
            consensus_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, consensus_mapping_list,
                                     cluster_dict)

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):
        input_file_name = prefix_2000 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        cluster_dict = Clustering.get_cluster_dict(sequence_kmer_list,
                                                   design_kmer_list,
                                                   use_cluster_size_hard_stop)
        pickle.dump(
            cluster_dict,
            open("clusters_no_hard_stop" + input_file_name + ".p", "wb"))
        consensus_kmer_list = cluster_dict.keys()
        consensus_mapping_list = get_bipartite_matching(
            consensus_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, consensus_mapping_list,
                                     cluster_dict)
示例#4
0
def main():

    sequence_kmer_list = kmers.get_random_sequence_kmers(1000)

    design_kmer_list = kmers.get_design_kmers()

    mapping_list = get_bipartite_matching(sequence_kmer_list, design_kmer_list)

    print(len(mapping_list))
def main():

    sequence_kmer_list = kmers.get_random_sequence_kmers(1000)

    design_kmer_list = kmers.get_design_kmers()

    isClusterSizeHardStop = True

    cluster_dict = get_cluster_dict(sequence_kmer_list, design_kmer_list, isClusterSizeHardStop)
def main():

    sequence_kmer_list = kmers.get_random_sequence_kmers(1000)

    design_kmer_list = kmers.get_design_kmers()

    cluster_dict = get_cluster_dict(sequence_kmer_list, design_kmer_list)

    pickle.dump(cluster_dict, open("cluster_dict.p", "wb"))
示例#7
0
def main():

    sequence_kmer_list = kmers.get_random_sequence_kmers(1000)

    design_kmer_list = kmers.get_design_kmers()

    mapping_list = get_bipartite_matching(sequence_kmer_list, design_kmer_list)

    print(len(mapping_list))
def main():

    sequence_kmer_list = kmers.get_random_sequence_kmers(1000)

    design_kmer_list = kmers.get_design_kmers()

    cluster_dict = get_cluster_dict(sequence_kmer_list, design_kmer_list)

    pickle.dump(cluster_dict, open("cluster_dict.p", "wb"))
def main():

    sequence_kmer_list = kmers.get_random_sequence_kmers(1000)

    design_kmer_list = kmers.get_design_kmers()

    isClusterSizeHardStop = True

    cluster_dict = get_cluster_dict(sequence_kmer_list, design_kmer_list,
                                    isClusterSizeHardStop)
示例#10
0
def main():

    design_kmer_list = kmers.get_design_kmers()

    isClusterSizeHardStop = False

    input_file_name = "500_1"

    sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)

    cluster_dict = get_cluster_dict(sequence_kmer_list, design_kmer_list,
                                    isClusterSizeHardStop)
示例#11
0
def main():

    design_kmer_list = kmers.get_design_kmers()

    isClusterSizeHardStop = False

    input_file_name = "500_1"

    sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)

    cluster_dict = get_cluster_dict(sequence_kmer_list,
                                                   design_kmer_list,
                                                   isClusterSizeHardStop)
def main():

    prefix_500 = "500"
    prefix_1000 = "1000"
    prefix_2000 = "2000"

    suffix_start_count = 1
    suffix_end_count = 10

    design_kmer_list = kmers.get_design_kmers()

    use_cluster_size_hard_stop = False

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):

        input_file_name = prefix_500 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        cluster_dict = Clustering.get_cluster_dict(sequence_kmer_list,
                                                   design_kmer_list,
                                                   use_cluster_size_hard_stop)
        pickle.dump(cluster_dict, open("clusters_no_hard_stop" + input_file_name + ".p", "wb"))
        consensus_kmer_list = cluster_dict.keys()
        consensus_mapping_list = get_bipartite_matching(consensus_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, consensus_mapping_list, cluster_dict)

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):
        input_file_name = prefix_1000 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        cluster_dict = Clustering.get_cluster_dict(sequence_kmer_list,
                                                   design_kmer_list,
                                                   use_cluster_size_hard_stop)
        pickle.dump(cluster_dict, open("clusters_no_hard_stop" + input_file_name + ".p", "wb"))
        consensus_kmer_list = cluster_dict.keys()
        consensus_mapping_list = get_bipartite_matching(consensus_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, consensus_mapping_list, cluster_dict)

    for suffix_count in range(suffix_start_count, suffix_end_count + 1):
        input_file_name = prefix_2000 + '_' + str(suffix_count)
        sequence_kmer_list = kmers.get_sequence_kmers(input_file_name)
        cluster_dict = Clustering.get_cluster_dict(sequence_kmer_list,
                                                   design_kmer_list,
                                                   use_cluster_size_hard_stop)
        pickle.dump(cluster_dict, open("clusters_no_hard_stop" + input_file_name + ".p", "wb"))
        consensus_kmer_list = cluster_dict.keys()
        consensus_mapping_list = get_bipartite_matching(consensus_kmer_list, design_kmer_list)
        output_seq_kmer_mapping_list(input_file_name, consensus_mapping_list, cluster_dict)