def setup_module(): global x_tr, y_tr, x_dv, y_dv, counts_tr, x_dv_pruned, x_tr_pruned global labels global vocab global X_tr, X_tr_var, X_dv_var, Y_tr, Y_dv, Y_tr_var, Y_dv_var, Y_test_var y_tr, x_tr = preproc.read_data('../lyrics-train.csv', preprocessor=preproc.bag_of_words) labels = set(y_tr) counts_tr = preproc.aggregate_counts(x_tr) x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10) X_tr = preproc.make_numpy(x_tr_pruned, vocab) label_set = sorted(list(set(y_tr))) Y_tr = np.array([label_set.index(y_i) for y_i in y_tr]) X_tr_var = Variable(torch.from_numpy(X_tr.astype(np.float32))) Y_tr_var = Variable(torch.from_numpy(Y_tr)) y_dv, x_dv = preproc.read_data('../lyrics-dev.csv', preprocessor=preproc.bag_of_words) x_dv_pruned, _ = preproc.prune_vocabulary(counts_tr, x_dv, 10) # remove this, so people can run earlier tests X_dv = preproc.make_numpy(x_dv_pruned, vocab) Y_dv = np.array([label_set.index(y_i) for y_i in y_dv]) X_dv_var = Variable(torch.from_numpy(X_dv.astype(np.float32))) Y_dv_var = Variable(torch.from_numpy(Y_dv))
def setup_module(): global x_tr, y_tr, x_dv, y_dv, counts_tr, counts_dv, counts_bl, x_dv_pruned, x_tr_pruned global vocab y_tr, x_tr = preproc.read_data('../lyrics-train.csv', preprocessor=preproc.bag_of_words) y_dv, x_dv = preproc.read_data('../lyrics-dev.csv', preprocessor=preproc.bag_of_words) counts_tr = preproc.aggregate_counts(x_tr) counts_dv = preproc.aggregate_counts(x_dv)
def setup_module(): global x_tr, y_tr, x_dv, y_dv, counts_tr, x_dv_pruned, x_tr_pruned global labels global vocab global X_tr, X_tr_var, X_dv_var, Y_tr, Y_dv, Y_tr_var, Y_dv_var y_tr,x_tr = preproc.read_data('lyrics-train.csv',preprocessor=preproc.bag_of_words) labels = set(y_tr) counts_tr = preproc.aggregate_counts(x_tr) y_dv,x_dv = preproc.read_data('lyrics-dev.csv',preprocessor=preproc.bag_of_words) x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10) x_dv_pruned, _ = preproc.prune_vocabulary(counts_tr, x_dv, 10) ## remove this, so people can run earlier tests X_tr = preproc.make_numpy(x_tr_pruned,vocab) X_dv = preproc.make_numpy(x_dv_pruned,vocab) label_set = sorted(list(set(y_tr))) Y_tr = np.array([label_set.index(y_i) for y_i in y_tr]) Y_dv = np.array([label_set.index(y_i) for y_i in y_dv]) X_tr_var = Variable(torch.from_numpy(X_tr.astype(np.float32))) X_dv_var = Variable(torch.from_numpy(X_dv.astype(np.float32))) Y_tr_var = Variable(torch.from_numpy(Y_tr)) Y_dv_var = Variable(torch.from_numpy(Y_dv))
def setup_module(): global x_tr, y_tr, x_dv, y_dv, counts_tr, counts_dv, counts_bl, x_dv_pruned, x_tr_pruned global vocab y_tr,x_tr = preproc.read_data('lyrics-train.csv',preprocessor=preproc.bag_of_words) y_dv,x_dv = preproc.read_data('lyrics-dev.csv',preprocessor=preproc.bag_of_words) counts_tr = preproc.aggregate_counts(x_tr) counts_dv = preproc.aggregate_counts(x_dv)
def setup_module(): #global y_tr, x_tr, corpus_counts, labels, vocab #corpus_counts = get_corpus_counts(x_tr) global x_tr, y_tr, x_dv, y_dv, counts_tr, x_dv_pruned, x_tr_pruned, x_bl_pruned global labels global vocab y_tr,x_tr = preproc.read_data('lyrics-train.csv',preprocessor=preproc.bag_of_words) labels = set(y_tr) counts_tr = preproc.aggregate_counts(x_tr) y_dv,x_dv = preproc.read_data('lyrics-dev.csv',preprocessor=preproc.bag_of_words) x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10) x_dv_pruned, _ = preproc.prune_vocabulary(counts_tr, x_dv, 10)
def setup_module(): global vocab, label_set, x_tr_pruned, X_tr y_tr,x_tr = preproc.read_data('lyrics-train.csv',preprocessor=preproc.bag_of_words) labels = set(y_tr) counts_tr = preproc.aggregate_counts(x_tr) x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10) X_tr = preproc.make_numpy(x_tr_pruned,vocab) label_set = sorted(list(set(y_tr)))
def setup_module(): global vocab, label_set, x_tr_pruned, X_tr y_tr, x_tr = preproc.read_data('lyrics-train.csv', preprocessor=preproc.bag_of_words) labels = set(y_tr) counts_tr = preproc.aggregate_counts(x_tr) x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10) X_tr = preproc.make_numpy(x_tr_pruned, vocab) label_set = sorted(list(set(y_tr)))
from gtnlplib import preproc reload(preproc) #terminal semicolon suppresses output # this will not work until you implement it y_tr, x_tr = preproc.read_data( 'reddit-train.csv', #filename 'subreddit', #label field preprocessor=preproc.tokenize_and_downcase) #your preprocessor y_dv, x_dv = preproc.read_data( 'reddit-dev.csv', #filename 'subreddit', #label field preprocessor=preproc.tokenize_and_downcase) #your preprocessor y_te, x_te = preproc.read_data( 'reddit-test.csv', #filename 'subreddit', #label field preprocessor=preproc.tokenize_and_downcase) #your preprocessor corpus_counts = preproc.get_corpus_counts(x_tr) vocab = [word for word, count in corpus_counts.iteritems() if count > 10] print len(vocab) print len(x_tr[0]) x_tr = [{key: val for key, val in x_i.iteritems() if key in vocab} for x_i in x_tr] x_dv = [{key: val for key, val in x_i.iteritems() if key in vocab} for x_i in x_dv] x_te = [{key: val for key, val in x_i.iteritems() if key in vocab} for x_i in x_te]