예제 #1
0
def setup_module():
    global x_tr, y_tr, x_dv, y_dv, counts_tr, counts_dv, counts_bl, x_dv_pruned, x_tr_pruned
    global vocab
    y_tr, x_tr = preproc.read_data('../lyrics-train.csv', preprocessor=preproc.bag_of_words)
    y_dv, x_dv = preproc.read_data('../lyrics-dev.csv', preprocessor=preproc.bag_of_words)
    counts_tr = preproc.aggregate_counts(x_tr)
    counts_dv = preproc.aggregate_counts(x_dv)
예제 #2
0
def setup_module():
    global x_tr, y_tr, x_dv, y_dv, counts_tr, counts_dv, counts_bl, x_dv_pruned, x_tr_pruned
    global vocab
    y_tr,x_tr = preproc.read_data('lyrics-train.csv',preprocessor=preproc.bag_of_words)
    y_dv,x_dv = preproc.read_data('lyrics-dev.csv',preprocessor=preproc.bag_of_words)

    counts_tr = preproc.aggregate_counts(x_tr)
    counts_dv = preproc.aggregate_counts(x_dv)
예제 #3
0
def setup_module():
    global x_tr, y_tr, x_dv, y_dv, counts_tr, x_dv_pruned, x_tr_pruned
    global labels
    global vocab
    global X_tr, X_tr_var, X_dv_var, Y_tr, Y_dv, Y_tr_var, Y_dv_var, Y_test_var

    y_tr, x_tr = preproc.read_data('../lyrics-train.csv', preprocessor=preproc.bag_of_words)
    labels = set(y_tr)

    counts_tr = preproc.aggregate_counts(x_tr)
    x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10)
    X_tr = preproc.make_numpy(x_tr_pruned, vocab)

    label_set = sorted(list(set(y_tr)))
    Y_tr = np.array([label_set.index(y_i) for y_i in y_tr])

    X_tr_var = Variable(torch.from_numpy(X_tr.astype(np.float32)))
    Y_tr_var = Variable(torch.from_numpy(Y_tr))

    y_dv, x_dv = preproc.read_data('../lyrics-dev.csv', preprocessor=preproc.bag_of_words)

    x_dv_pruned, _ = preproc.prune_vocabulary(counts_tr, x_dv, 10)

    # remove this, so people can run earlier tests
    X_dv = preproc.make_numpy(x_dv_pruned, vocab)
    Y_dv = np.array([label_set.index(y_i) for y_i in y_dv])

    X_dv_var = Variable(torch.from_numpy(X_dv.astype(np.float32)))
    Y_dv_var = Variable(torch.from_numpy(Y_dv))
예제 #4
0
def setup_module():
    global x_tr, y_tr, x_dv, y_dv, counts_tr, x_dv_pruned, x_tr_pruned
    global labels
    global vocab
    global X_tr, X_tr_var, X_dv_var, Y_tr, Y_dv, Y_tr_var, Y_dv_var

    y_tr,x_tr = preproc.read_data('lyrics-train.csv',preprocessor=preproc.bag_of_words)
    labels = set(y_tr)

    counts_tr = preproc.aggregate_counts(x_tr)

    y_dv,x_dv = preproc.read_data('lyrics-dev.csv',preprocessor=preproc.bag_of_words)

    x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10)
    x_dv_pruned, _ = preproc.prune_vocabulary(counts_tr, x_dv, 10)

    ## remove this, so people can run earlier tests    
    X_tr = preproc.make_numpy(x_tr_pruned,vocab)
    X_dv = preproc.make_numpy(x_dv_pruned,vocab)
    label_set = sorted(list(set(y_tr)))
    Y_tr = np.array([label_set.index(y_i) for y_i in y_tr])
    Y_dv = np.array([label_set.index(y_i) for y_i in y_dv])

    X_tr_var = Variable(torch.from_numpy(X_tr.astype(np.float32)))
    X_dv_var = Variable(torch.from_numpy(X_dv.astype(np.float32)))

    Y_tr_var = Variable(torch.from_numpy(Y_tr))
    Y_dv_var = Variable(torch.from_numpy(Y_dv))
예제 #5
0
def estimate_nb(x, y, smoothing):
    """estimate a naive bayes model

    :param x: list of dictionaries of base feature counts
    :param y: list of labels
    :param smoothing: smoothing constant
    :returns: a defaultdict of features and weights. features are tuples (label,base_feature).
    :rtype: defaultdict 

    Hint: See clf_base.predict() for the exact return type information. 

    """
    new_x = []
    for i in x:
        new_x.append(Counter(i))

    counts_tr = preproc.aggregate_counts(new_x)
    vocab = [w for w, c in counts_tr.items()
             if c >= 10]  #calculating vocabulary again
    len_v = len(vocab)
    features = defaultdict(float)
    y_lable = set(y)
    for label in y_lable:
        allcounts_label = get_corpus_counts(x, y, label)
        for word in vocab:
            features[(label, word)] = np.log(
                (allcounts_label[word] + smoothing) /
                (sum(list(allcounts_label.values())) + (smoothing * len_v)))
        features[(label, OFFSET)] = np.log(len(list(y[label == y])) / len(y))

    return features
예제 #6
0
def setup_module():
    global vocab, label_set, x_tr_pruned, X_tr

    y_tr,x_tr = preproc.read_data('lyrics-train.csv',preprocessor=preproc.bag_of_words)
    labels = set(y_tr)

    counts_tr = preproc.aggregate_counts(x_tr)

    x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10)

    X_tr = preproc.make_numpy(x_tr_pruned,vocab)
    label_set = sorted(list(set(y_tr)))
def setup_module():
    global vocab, label_set, x_tr_pruned, X_tr

    y_tr, x_tr = preproc.read_data('lyrics-train.csv',
                                   preprocessor=preproc.bag_of_words)
    labels = set(y_tr)

    counts_tr = preproc.aggregate_counts(x_tr)

    x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10)

    X_tr = preproc.make_numpy(x_tr_pruned, vocab)
    label_set = sorted(list(set(y_tr)))
예제 #8
0
def setup_module():
    #global y_tr, x_tr, corpus_counts, labels, vocab
    #corpus_counts = get_corpus_counts(x_tr)


    global x_tr, y_tr, x_dv, y_dv, counts_tr, x_dv_pruned, x_tr_pruned, x_bl_pruned
    global labels
    global vocab

    y_tr,x_tr = preproc.read_data('lyrics-train.csv',preprocessor=preproc.bag_of_words)
    labels = set(y_tr)

    counts_tr = preproc.aggregate_counts(x_tr)

    y_dv,x_dv = preproc.read_data('lyrics-dev.csv',preprocessor=preproc.bag_of_words)

    x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10)
    x_dv_pruned, _ = preproc.prune_vocabulary(counts_tr, x_dv, 10)
예제 #9
0
def setup_module():
    #global y_tr, x_tr, corpus_counts, labels, vocab
    #corpus_counts = get_corpus_counts(x_tr)


    global x_tr, y_tr, x_dv, y_dv, counts_tr, x_dv_pruned, x_tr_pruned, x_bl_pruned
    global labels
    global vocab

    y_tr,x_tr = preproc.read_data('lyrics-train.csv',preprocessor=preproc.bag_of_words)
    labels = set(y_tr)

    counts_tr = preproc.aggregate_counts(x_tr)

    y_dv,x_dv = preproc.read_data('lyrics-dev.csv',preprocessor=preproc.bag_of_words)

    x_tr_pruned, vocab = preproc.prune_vocabulary(counts_tr, x_tr, 10)
    x_dv_pruned, _ = preproc.prune_vocabulary(counts_tr, x_dv, 10)