def main(): ######### # SETUP # ######### # Get input args newsgroups_root_dir = argv[1] feat_def_path = argv[2] class_def_path = argv[3] training_data_path = argv[4] # Generate index #index_newsgroups(newsgroups_root_dir, "idx_save.pkl") ii = InvertedIndex() ii.load("idx_save.pkl") # Write out feature/term pairs to feat_def_path feature_id = 0 with open(feat_def_path, 'w') as outf: for item in ii.items: outf.write(str(feature_id) + " " + str(item) + "\n") feature_id += 1 # Read back in the feature/term pairs for later with open(feat_def_path, 'r') as inf: ft_pairs = inf.readlines() # Put the ft_pairs into a dictionary for quick lookup ft_dict = {} for pair in ft_pairs: ft_dict[pair.split()[1].strip()] = pair.split()[0] # Map the different newsgroups to a given class # This is fairly manual... with open(class_def_path, 'w') as outf: for dir in listdir(newsgroups_root_dir): outf.write(class_def_helper(dir) + " " + dir + "\n") ############################ # TRAINING DATA GENERATION # ############################ # Create the training data # For each document: # Find its containing folder, and extract class from class def # For each term in document # Compute tfidf, tf or idf current_file_id = 1 with open(training_data_path + ".TFIDF", 'w') as outf: # Compute tf-idf # Go through each document in newsgroups dir for root, _, files in walk(newsgroups_root_dir): # Find and write out the class label local_dir = root.split(sep)[-1] # For each file... for file in files: outf.write(class_def_helper(local_dir) + " ") print(root, file) # Get the words from the doc stemmed_token_list = preprocess_doc(root + sep + file) # Put all the info into a set (for uniqueness) data_set = set() # Now that we've re-done all that, find idfs for word in stemmed_token_list: # Skip blank stopwords if word == "": continue # Get the term ID #outf.write(ft_dict[word] + ":") # Calculate and write out TF-IDF # Note current_file_id is our doc_id tf = ii.find(word).posting[current_file_id].term_freq() idf = ii.idf(word) #outf.write(str(log10(1 + tf) * idf) + " ") data_set.add(ft_dict[word] + ":" + str(log10(1 + tf) * idf)) # Write newline to signify end of file #outf.write("\n") outf.write(" ".join( sorted(data_set, key=lambda x: int(x.split(':')[0]))) + "\n") outf.flush() # Increment our current doc current_file_id += 1 current_file_id = 1 with open(training_data_path + ".TF", 'w') as outf: # Compute tf # Go through each document in newsgroups dir for root, _, files in walk(newsgroups_root_dir): # Find and write out the class label local_dir = root.split(sep)[-1] # For each file... for file in files: outf.write(class_def_helper(local_dir) + " ") print(root, file) # Get the words from the doc stemmed_token_list = preprocess_doc(root + sep + file) # Put all the info into a set (for uniqueness) data_set = set() # Now that we've re-done all that, find idfs for word in stemmed_token_list: # Skip blank stopwords if word == "": continue # Get the term ID #outf.write(ft_dict[word] + ":") # Write the TF # Note current_file_id is our doc_id # outf.write(str(ii.find(word).posting[ # current_file_id].term_freq()) + " ") data_set.add(ft_dict[word] + ":" + str( ii.find(word).posting[current_file_id].term_freq())) # Write newline to signify end of file # outf.write("\n") outf.write(" ".join( sorted(data_set, key=lambda x: int(x.split(':')[0]))) + "\n") # outf.flush() # Increment our current doc current_file_id += 1 current_file_id = 1 with open(training_data_path + ".IDF", 'w') as outf: # Compute idf # Go through each document in newsgroups dir for root, _, files in walk(newsgroups_root_dir): # Find and write out the class label local_dir = root.split(sep)[-1] # For each file... for file in files: outf.write(class_def_helper(local_dir) + " ") print(root, file) # Get the words from the doc stemmed_token_list = preprocess_doc(root + sep + file) # Put all the info into a set (for uniqueness) data_set = set() # Now that we've re-done all that, find idfs for word in stemmed_token_list: # Skip blank stopwords if word == "": continue # Get the term ID #outf.write(ft_dict[word] + ":" + str(ii.idf(word)) # + " ") data_set.add(ft_dict[word] + ":" + str(ii.idf(word))) # Write newline to signify end of file outf.write(" ".join( sorted(data_set, key=lambda x: int(x.split(':')[0]))) + "\n")
def test(index_loc, cran_loc, qrels_loc): ''' test your code thoroughly. put the testing cases here''' ##### SETUP ITEMS ##### # Grab index file to restore II ii = InvertedIndex() ii.load(index_loc) # Get the document collection cf = CranFile(cran_loc) # Get ground-truth results from qrels.txt with open(qrels_loc) as f: qrels = f.readlines() # Index qrels into a dict qrel_dict = {} for qrel in qrels: qrel_split = qrel.split() if int(qrel_split[0]) in qrel_dict: qrel_dict[int(qrel_split[0])].append(int(qrel_split[1])) else: qrel_dict[int(qrel_split[0])] = [int(qrel_split[1])] ##### INITIAL TEST ITEMS ##### print("TESTS BASED ON SUGGESTED TESTING POINTS") # Ensure tf is correct # Find a random word and check TF value against what is manually done posting_list = ii.find("experiment").posting tf_vector = [] for posting in posting_list: tf_vector.append(len(posting_list[posting].positions) \ == posting_list[posting].term_freq()) print("TF is computed correctly:", all(tf_vector)) # Ensure idf is correct print("IDF is computed correctly:", log10(ii.nDocs / len(posting_list)) \ == ii.idf("experiment")) # As both tf and idf are correct, and tf-idf is a product of the two, # it is reasonable to assume tf-idf is computed correctly ##### BOOL QUERY TESTS ##### # Here, I use very specific boolean queries to ensure that a # limited number of documents are returned print("\nBOOL QUERY TESTS") # Ensure that the exact title of doc 8 matches for doc 8 doc8 = "measurements of the effect of two-dimensional and three-dimensional roughness elements on boundary layer transition" qp1 = QueryProcessor(doc8, ii, cf) print("Bool query matches on exact title:", qp1.booleanQuery() == [8]) # Ensure that bool query matches very specific AND query qp2 = QueryProcessor("hugoniot and infinitesimally", ii, cf) print( "Bool query matches on specific AND query ('hugoniot and infinitesimally'):", qp2.booleanQuery() == [329]) # Test that an OR query is handled properly # Both gravel and stagnation have completely distinct postings lists. # OR should merge them. gravel_postings = ii.find("gravel").sorted_postings[:] stag_postings = ii.find("stagnat").sorted_postings[:] gravel_postings.extend(stag_postings) qp3 = QueryProcessor("gravel or stagnation", ii, cf) print("Bool query successfully handles OR ('gravel or stagnation'):", qp3.booleanQuery() == sorted(gravel_postings)) # Test that NOT is handled properly # The posting list for "diameter" is a subset of "slipstream" postings # (oddly enough). To test this works, do "slipstream and not diameter" # and we chould get slipstream's postings minus those of diameter. slip_postings = ii.find("slipstream").sorted_postings[:] diam_postings = ii.find("diamet").sorted_postings[:] slip_not_diam = [t for t in slip_postings if t not in diam_postings] print("Bool query successfully handles NOT ('slipstream and not diameter'):", QueryProcessor("slipstream and not diameter", ii, cf).booleanQuery() \ == slip_not_diam) # Ensure AND/OR order doesn't matter print("Bool query can handle query regardless of AND order ('a and b' = 'b and a'):", QueryProcessor("slipstream and diameter", ii, cf).booleanQuery() \ == QueryProcessor("diameter and slipstream", ii, cf).booleanQuery()) print("Bool query can handle query regardless of OR order ('a or b' = 'b or a'):", QueryProcessor("slipstream or diameter", ii, cf).booleanQuery() \ == QueryProcessor("diameter or slipstream", ii, cf).booleanQuery()) # Ensure that the presence of parens does not change query results print("Bool query can handle query regardless of parens ('slipstream and diameter'):", QueryProcessor("slipstream and diameter", ii, cf).booleanQuery() \ == QueryProcessor("(slipstream and diameter)", ii, cf).booleanQuery()) # Ensure parentheses do not change order of processing for AND-AND and OR-OR queries print("Bool query AND is accociative ('(a and b) and c' = 'a and (b and c)'):", QueryProcessor("(slipstream and diameter) and thrust", ii, cf).booleanQuery() \ == QueryProcessor("slipstream and (diameter and thrust)", ii, cf).booleanQuery()) print("Bool query OR is accociative ('(a or b) or c' = 'a or (b or c)'):", QueryProcessor("(slipstream or diameter) or thrust", ii, cf).booleanQuery() \ == QueryProcessor("slipstream or (diameter or thrust)", ii, cf).booleanQuery()) # Ensure parentheses properly group items # Tested by doing the query "manually" by adding/orring the correct terms part_one = QueryProcessor("conduction and cylinder and gas", ii, cf).booleanQuery() part_two = QueryProcessor("radiation and gas", ii, cf).booleanQuery() part_one.extend(part_two) expected_result = QueryProcessor("hugoniot", ii, cf).booleanQuery() expected_result.extend(part_one) print("Bool query parens successfully group conflicting operators:", QueryProcessor("(conduction and cylinder and gas) or (radiation and gas) or hugoniot", ii, cf).booleanQuery() \ == sorted(list(set(expected_result)))) ##### VECTOR QUERY TESTS ##### # For this, just ensure that most of the results are in the expected list print("\nVECTOR QUERY TESTS") # Ensure vector query can match on exact title print("Vector query matches on exact title:", qp1.vectorQuery(1)[0][0] == 8) # Try a few example queries from query.text # As long as one-fifth of t-10 are in gt_result, call it a pass # Note that queries with larger answer sets were chosen to # ensure there were enough to get to one-fifth of ten qc = loadCranQry("query.text") poss_queries = list(qc) # Query 001 result = QueryProcessor(qc["001"].text, ii, cf).vectorQuery(10) gt_result = qrel_dict[poss_queries.index("001") + 1] correct_vector = list(map(lambda x: x in gt_result, [x[0] for x in result])) print("Vector query is at least one-fifth correct for query 001:", sum(correct_vector) > 2) # Query 128 result = QueryProcessor(qc["128"].text, ii, cf).vectorQuery(10) gt_result = qrel_dict[poss_queries.index("128") + 1] correct_vector = list(map(lambda x: x in gt_result, [x[0] for x in result])) print("Vector query is at least one-fifth correct for query 128:", sum(correct_vector) > 2) # Query 226 result = QueryProcessor(qc["226"].text, ii, cf).vectorQuery(10) gt_result = qrel_dict[poss_queries.index("226") + 1] correct_vector = list(map(lambda x: x in gt_result, [x[0] for x in result])) print("Vector query is at least one-fifth correct for query 226:", sum(correct_vector) > 2) # Query 196 result = QueryProcessor(qc["196"].text, ii, cf).vectorQuery(10) gt_result = qrel_dict[poss_queries.index("196") + 1] correct_vector = list(map(lambda x: x in gt_result, [x[0] for x in result])) print("Vector query is at least one-fifth correct for query 196:", sum(correct_vector) > 2) # Query 291 result = QueryProcessor(qc["291"].text, ii, cf).vectorQuery(10) gt_result = qrel_dict[poss_queries.index("291") + 1] correct_vector = list(map(lambda x: x in gt_result, [x[0] for x in result])) print("Vector query is at least one-fifth correct for query 291:", sum(correct_vector) > 2)