def loadBatchResults(request): if request.method == 'GET': print '>> loading batch result request...' current_dir = os.path.dirname(os.path.realpath(__file__)) query = 'virtual reality' f = os.path.join(current_dir, 'batchresults', query + '.txt') PubMedParser.load_evidence(f, True, 12) return HttpResponse(json.dumps({}), status=status.HTTP_200_OK)
def find_neighbors_for_terms(terms, num_neighbors=10, user_id=1): query = '' for t in terms: query += t + ' ' current_dir = os.path.dirname(os.path.realpath(__file__)) f = os.path.join(current_dir, 'queryresults', query + '.txt') logfile = os.path.join(current_dir, 'queryresults', query + '_log' + '.txt') query_pubmed(query, f, logfile) keywords = PubMedParser.extract_repeated_keywords([f], terms, threshold=10) PubMedParser.load_evidence(f) sorted_keywords = sorted(keywords.items(), key=operator.itemgetter(1), reverse=True) num_keywords_all = len(sorted_keywords) pub_counts = read_counts(logfile) pub_counts['keyword_count'] = num_keywords_all pub_counts['showing_count'] = num_neighbors return {'keywords': sorted_keywords[:num_neighbors], 'log': pub_counts}
def find_evidence_for_terms(terms, skip_no_abstract=False, user_id=1): print '>> finding evidence for terms...' query = ' '.join(terms) current_dir = os.path.dirname(os.path.realpath(__file__)) f = os.path.join(current_dir, 'queryresults', query + '.txt') logfile = os.path.join(current_dir, 'queryresults', query + '_log' + '.txt') query_pubmed(query, f, logfile) return PubMedParser.load_evidence(f, skip_no_abstract)