Example #1
0
def main(q1, q2, articles, batch, input_type, outputFileName, dictType):
    num_articles = int(articles)
    query = queries.main(q1, q2)

    if batch == "yes":
        q1_id_list, q2_id_list = pmids.main(query, num_articles)
        q1_file_paths = run_tees_batch(q1, q1_id_list)
        q2_file_paths = run_tees_batch(q2, q2_id_list)

    if batch == "no":
        q1_id_list, q2_id_list = pmids.main(query, num_articles)
        q1_file_paths = run_tees(q1, q1_id_list)
        q2_file_paths = run_tees(q2, q2_id_list)

    q1_dict = get_info_from_interaction_xml(q1_file_paths)
    q2_dict = get_info_from_interaction_xml(q2_file_paths)

    if dictType == 'all' or dictType == 'both':
        all_words_dict = get_all_words_dict(q1, q2, q1_dict, q2_dict)
        normalized_all_words_dict = normalize_dict(all_words_dict, query)
        angle_list_all = Cosine_Sim.main(normalized_all_words_dict, q1, q2)
        print_pair_score_dict(angle_list_all, normalized_all_words_dict, q1,
                              q2, input_type, outputFileName)

    if dictType == 'protein' or dictType == 'both':
        query_dicts = [q1_dict, q2_dict]
        combined_dict = combine_dictionaries(query_dicts)
        normalized_protein_dict = normalize_dict(combined_dict, query)
        angle_list_protein = Cosine_Sim.main(normalized_protein_dict, q1, q2)
        print_pair_score_dict(angle_list_protein, normalized_protein_dict, q1,
                              q2, input_type, outputFileName)
Example #2
0
def main(q1, q2, articles, batch, input_type, outputFileName, dictType):
    num_articles = int(articles)
    query = queries.main(q1,q2)
    
    if batch == "yes":
        q1_id_list, q2_id_list = pmids.main(query, num_articles)
        q1_file_paths= run_tees_batch(q1, q1_id_list)
        q2_file_paths= run_tees_batch(q2, q2_id_list)
    
    if batch == "no":
        q1_id_list, q2_id_list = pmids.main(query, num_articles)  
        q1_file_paths= run_tees(q1, q1_id_list)
        q2_file_paths= run_tees(q2, q2_id_list)

    q1_dict = get_info_from_interaction_xml(q1_file_paths)
    q2_dict = get_info_from_interaction_xml(q2_file_paths)
    
    if dictType == 'all' or dictType == 'both':
        all_words_dict = get_all_words_dict(q1, q2, q1_dict, q2_dict)
        normalized_all_words_dict = normalize_dict(all_words_dict, query)
        angle_list_all = Cosine_Sim.main(normalized_all_words_dict, q1, q2)
        print_pair_score_dict(angle_list_all, normalized_all_words_dict, q1, q2, input_type, outputFileName)        
    
    if dictType == 'protein' or dictType == 'both':
        query_dicts = [q1_dict, q2_dict]
        combined_dict = combine_dictionaries(query_dicts)
        normalized_protein_dict = normalize_dict(combined_dict, query)
        angle_list_protein = Cosine_Sim.main(normalized_protein_dict, q1, q2)
        print_pair_score_dict(angle_list_protein, normalized_protein_dict, q1, q2, input_type, outputFileName)
Example #3
0
def main(q1, q2, articles):
    id_list = pmids.main(q1, q2, articles)
    #    pmid_xml = pmids_xml.main(q1,q2,articles)
    #   tees_wrapper.main(pmid_xml)
    print id_list
    classify.classify('9668063', 'GE11',
                      'home/ubuntu/output/one_at_a_time/oneatatime')

    #    for pmid in id_list:
    #	file_path = '/home/ubuntu/output/pmids/%s' % pmid
    #	if os.path.isdir(file_path):
    #	    continue
    #	else:
    #            classify.classify(pmid,'GE11',file_path)

    #    f = tempfile.NamedTemporaryFile()
    #    try:
    #        print 'temp:', temp
    #        print 'temp.name:', temp.name
    #    f.write(pmid_xml)
    print 'a'
Example #4
0
def main(q1, q2, articles):
    id_list = pmids.main(q1,q2,articles)
#    pmid_xml = pmids_xml.main(q1,q2,articles)
#   tees_wrapper.main(pmid_xml)
    print id_list
    classify.classify('9668063','GE11','home/ubuntu/output/one_at_a_time/oneatatime')

#    for pmid in id_list: 
#	file_path = '/home/ubuntu/output/pmids/%s' % pmid
#	if os.path.isdir(file_path):
#	    continue
#	else:
#            classify.classify(pmid,'GE11',file_path)    


#    f = tempfile.NamedTemporaryFile()
#    try:
#        print 'temp:', temp
#        print 'temp.name:', temp.name
#    f.write(pmid_xml)
    print 'a' 
Example #5
0
def main(q1, q2, articles, batch, input_type, outputFileName, dictType,
         outputType, evaluation_mode, stemmed, model, text_file):
    models = model.split(' ')
    num_articles = int(articles)
    query = queries.main(q1, q2)
    q1_dict = {}
    q2_dict = {}

    q1_already_downloaded_ids = get_already_downloaded_ids(q1, models)
    q2_already_downloaded_ids = get_already_downloaded_ids(q2, models)
    q1_already_downloaded_file_path_list = get_already_downloaded_file_paths(
        q1, models, num_articles)
    q2_already_downloaded_file_path_list = get_already_downloaded_file_paths(
        q2, models, num_articles)

    q1_already_dl_slice = None
    q2_already_dl_slice = None
    q1_file_paths = None
    q2_file_paths = None

    #     if num_articles <= len(q1_already_downloaded_file_path_list):
    #         q1_already_dl_slice = q1_already_downloaded_file_path_list[:num_articles]
    #         q1_dict = get_info_from_interaction_xml(q1_already_dl_slice)
    #    else:

    if num_articles * 100 <= len(q1_already_downloaded_file_path_list):
        q1_already_dl_slice = q1_already_downloaded_file_path_list[:
                                                                   num_articles]
        q1_dict = get_info_from_interaction_xml(q1_already_dl_slice)
    else:
        q1_id_list = pmids.main(query.q1, num_articles, query.q1_search_string,
                                evaluation_mode)
        if len(q1_id_list) == len(q1_already_downloaded_file_path_list):
            q1_dict = get_info_from_interaction_xml(
                q1_already_downloaded_file_path_list)
        else:
            if batch == 'yes':
                q1_file_paths = run_tees_batch(q1, q1_id_list, models,
                                               text_file)
            elif batch == 'no':
                q1_file_paths = run_tees(q1, q1_id_list, models, text_file)
            if not q1_file_paths:
                q1_file_paths = q1_already_downloaded_file_path_list[:
                                                                     num_articles]
            q1_dict = get_info_from_interaction_xml(q1_file_paths)

    if num_articles * 100 <= len(q2_already_downloaded_file_path_list):
        q2_already_dl_slice = q2_already_downloaded_file_path_list[:
                                                                   num_articles]
        q2_dict = get_info_from_interaction_xml(q2_already_dl_slice)
    else:
        q2_id_list = pmids.main(query.q2, num_articles, query.q2_search_string,
                                evaluation_mode)
        if len(q2_id_list) == len(q2_already_downloaded_file_path_list):
            q2_dict = get_info_from_interaction_xml(
                q2_already_downloaded_file_path_list)
        else:
            if batch == 'yes':
                q2_file_paths = run_tees_batch(q2, q2_id_list, models,
                                               text_file)
            elif batch == 'no':
                q2_file_paths = run_tees(q2, q2_id_list, models, text_file)
            if not q2_file_paths:
                q2_file_paths = q2_already_downloaded_file_path_list[:
                                                                     num_articles]
            q2_dict = get_info_from_interaction_xml(q2_file_paths)

    if q1_already_dl_slice:
        q1_num_docs_processed = len(q1_already_dl_slice)
    elif q1_file_paths:
        q1_num_docs_processed = len(q1_file_paths)
    else:
        q1_num_docs_processed = len(q1_already_downloaded_file_path_list)

    if q2_already_dl_slice:
        q2_num_docs_processed = len(q2_already_dl_slice)
    elif q2_file_paths:
        q2_num_docs_processed = len(q2_file_paths)
    else:
        q2_num_docs_processed = len(q2_already_downloaded_file_path_list)

    print q1, 'num_docs_processed', q1_num_docs_processed
    print q2, 'num_docs_processed', q2_num_docs_processed
    num_docs_processed = [q1_num_docs_processed, q2_num_docs_processed]

    return_dict_s = []
    if dictType == 'all':
        all_words_dict = get_all_words_dict(q1, q2, q1_dict, q2_dict)
        normalized_all_words_dict = normalize_dict(all_words_dict, query,
                                                   stemmed)
        return_dict_s.append(normalized_all_words_dict)
        if len(normalized_all_words_dict[query.q1.lower()]) < 1 or len(
                normalized_all_words_dict[query.q2.lower()]) < 1:
            angle_list = [90.00]
        else:
            angle_list = Cosine_Sim.main(normalized_all_words_dict, q1, q2)

    if dictType == 'protein':
        query_dicts = [q1_dict, q2_dict]
        combined_dict = combine_dictionaries(query_dicts)
        normalized_protein_dict = normalize_dict(combined_dict, query, stemmed)
        return_dict_s.append(normalized_protein_dict)
        if len(normalized_protein_dict[query.q1.lower()]) < 1 or len(
                normalized_protein_dict[query.q2.lower()]) < 1:
            angle_list = [90.00]
        else:
            angle_list = Cosine_Sim.main(normalized_protein_dict, q1, q2)

    return angle_list, return_dict_s, num_docs_processed
Example #6
0
def main(q1, q2, articles, batch, input_type, outputFileName, dictType, outputType, evaluation_mode, stemmed, model, text_file):
    models = model.split(' ')
    num_articles = int(articles)
    query = queries.main(q1,q2)
    q1_dict = {}
    q2_dict = {}

    q1_already_downloaded_ids = get_already_downloaded_ids(q1, models)
    q2_already_downloaded_ids = get_already_downloaded_ids(q2, models)
    q1_already_downloaded_file_path_list = get_already_downloaded_file_paths(q1, models, num_articles)
    q2_already_downloaded_file_path_list = get_already_downloaded_file_paths(q2, models, num_articles)
    
    q1_already_dl_slice = None
    q2_already_dl_slice = None
    q1_file_paths = None
    q2_file_paths = None 
    
    
#     if num_articles <= len(q1_already_downloaded_file_path_list):
#         q1_already_dl_slice = q1_already_downloaded_file_path_list[:num_articles]
#         q1_dict = get_info_from_interaction_xml(q1_already_dl_slice)
#    else:

    if num_articles * 100 <= len(q1_already_downloaded_file_path_list):
        q1_already_dl_slice = q1_already_downloaded_file_path_list[:num_articles]
        q1_dict = get_info_from_interaction_xml(q1_already_dl_slice)
    else:
        q1_id_list = pmids.main(query.q1, num_articles, query.q1_search_string, evaluation_mode)
        if len(q1_id_list) == len(q1_already_downloaded_file_path_list):
            q1_dict = get_info_from_interaction_xml(q1_already_downloaded_file_path_list)
        else:
            if batch == 'yes':
                q1_file_paths = run_tees_batch(q1, q1_id_list, models, text_file)
            elif batch == 'no':
                q1_file_paths = run_tees(q1, q1_id_list, models, text_file)
            if not q1_file_paths:
                q1_file_paths = q1_already_downloaded_file_path_list[:num_articles]
            q1_dict = get_info_from_interaction_xml(q1_file_paths)
    
    if num_articles * 100 <= len(q2_already_downloaded_file_path_list):
        q2_already_dl_slice = q2_already_downloaded_file_path_list[:num_articles]
        q2_dict = get_info_from_interaction_xml(q2_already_dl_slice)
    else:
        q2_id_list = pmids.main(query.q2, num_articles, query.q2_search_string, evaluation_mode)
        if len(q2_id_list) == len(q2_already_downloaded_file_path_list):
            q2_dict = get_info_from_interaction_xml(q2_already_downloaded_file_path_list)
        else:
            if batch == 'yes':
                q2_file_paths= run_tees_batch(q2, q2_id_list, models, text_file)
            elif batch == 'no':
                q2_file_paths= run_tees(q2, q2_id_list, models, text_file)
            if not q2_file_paths:
                q2_file_paths = q2_already_downloaded_file_path_list[:num_articles]
            q2_dict = get_info_from_interaction_xml(q2_file_paths)


    if q1_already_dl_slice:
        q1_num_docs_processed = len(q1_already_dl_slice)
    elif q1_file_paths:
        q1_num_docs_processed = len(q1_file_paths)
    else:
        q1_num_docs_processed = len(q1_already_downloaded_file_path_list)
        
    if q2_already_dl_slice:
        q2_num_docs_processed = len(q2_already_dl_slice)
    elif q2_file_paths:
        q2_num_docs_processed = len(q2_file_paths)
    else:
        q2_num_docs_processed = len(q2_already_downloaded_file_path_list)
        
    print q1, 'num_docs_processed', q1_num_docs_processed
    print q2, 'num_docs_processed', q2_num_docs_processed
    num_docs_processed = [q1_num_docs_processed,q2_num_docs_processed]
    
    return_dict_s = []
    if dictType == 'all':
        all_words_dict = get_all_words_dict(q1, q2, q1_dict, q2_dict)
        normalized_all_words_dict = normalize_dict(all_words_dict, query, stemmed)
        return_dict_s.append(normalized_all_words_dict)
        if len(normalized_all_words_dict[query.q1.lower()]) < 1 or len(normalized_all_words_dict[query.q2.lower()]) < 1:
            angle_list = [90.00]
        else:
            angle_list = Cosine_Sim.main(normalized_all_words_dict, q1, q2)
        
    if dictType == 'protein':
        query_dicts = [q1_dict, q2_dict]
        combined_dict = combine_dictionaries(query_dicts)
        normalized_protein_dict = normalize_dict(combined_dict, query, stemmed)
        return_dict_s.append(normalized_protein_dict)
        if len(normalized_protein_dict[query.q1.lower()]) < 1 or len(normalized_protein_dict[query.q2.lower()]) < 1:
            angle_list = [90.00]
        else:
            angle_list = Cosine_Sim.main(normalized_protein_dict, q1, q2)


    return angle_list, return_dict_s, num_docs_processed