Beispiel #1
0
def objective_and_grad(par_data):

    params, d, len_voc, rel_list = par_data[0]
    data = par_data[1]
    params = unroll_params(params, d, len_voc, rel_list)
    grads = init_dtrnn_grads(rel_list, d, len_voc)

    (rel_dict, Wv, b, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape((d, 1))

        tree.ans_vec = L[:, tree.ans_ind].reshape((d, 1))

        prop.forward_prop(params, tree, d)
        error_sum += tree.error()
        tree_size += len(nodes)

        prop.backprop(params[:-1], tree, d, len_voc, grads)

    grad = roll_params(grads, rel_list)
    return (error_sum, grad, tree_size)
Beispiel #2
0
def objective_and_grad(all_params, trees_batch):
    params_train, d, n_classes, len_voc, rel_list = all_params

    # returns list of initialized zero gradients which backprop modifies
    # rel_Wr, Wv, Wc, b, b_c, We
    grads = init_dtrnn_grads(rel_list, d, n_classes, len_voc)
    rel_Wr_dict, Wv, Wc, b, b_c, We = params_train

    error_sum = 0.0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for idx, tree in enumerate(trees_batch):
        nodes = tree.get_word_nodes()
        for node in nodes:
            node.vec = We[:, node.ind].reshape((d, 1))

        prop.forward_prop(params_train, tree, d, n_classes)
        error_sum += tree.error()
        tree_size += len(nodes)

        prop.backprop(params_train[:-1], tree, d, n_classes, len_voc, grads)

    return error_sum, grads, tree_size
Beispiel #3
0
def objective_and_grad(par_data):

    params, d, len_voc, rel_list = par_data[0]
    data = par_data[1]
    params = unroll_params(params, d, len_voc, rel_list)
    grads = init_dtrnn_grads(rel_list, d, len_voc)

    (rel_dict, Wv, b, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape( (d, 1))

        tree.ans_vec = L[:, tree.ans_ind].reshape( (d, 1))

        prop.forward_prop(params, tree, d)
        error_sum += tree.error()
        tree_size += len(nodes)

        prop.backprop(params[:-1], tree, d, len_voc, grads)

    grad = roll_params(grads, rel_list)
    return (error_sum, grad, tree_size)
Beispiel #4
0
def objective_and_grad(par_data):

    params, d, c, len_voc, rel_list = par_data[0]
    data = par_data[1]

    # returns list of initialized zero gradients which backprop modifies
    grads = init_dtrnn_grads(rel_list, d, c, len_voc)
    (rel_dict, Wv, Wc, b, b_c, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape((d, 1))

        prop.forward_prop(params, tree, d, c)
        error_sum += tree.error()
        tree_size += len(nodes)

        prop.backprop(params[:-1], tree, d, c, len_voc, grads)

    return (error_sum, grads, tree_size)
Beispiel #5
0
def objective_and_grad(par_data):

    params, d, c, len_voc, rel_list = par_data[0]
    data = par_data[1]
    
    # returns list of initialized zero gradients which backprop modifies
    grads = init_dtrnn_grads(rel_list, d, c, len_voc)
    (rel_dict, Wv, Wc, b, b_c, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape( (d, 1) )

        prop.forward_prop(params, tree, d, c)
        error_sum += tree.error()
        tree_size += len(nodes)

        prop.backprop(params[:-1], tree, d, c, len_voc, grads)

    return (error_sum, grads, tree_size)
Beispiel #6
0
def evaluate(trees_test, rel_Wr_dict, Wv, Wc, b, b_c, We, c, sents_test, aspect_terms_true, correctness_file,
             mixed=False):
    d = We.shape[0]
    aspect_words = set()
    all_y_true, all_y_pred = list(), list()
    assert len(trees_test) == len(sents_test)
    correctness = list()
    hit_cnt, aspect_true_cnt, aspect_pred_cnt = 0, 0, 0
    for ind, tree in enumerate(trees_test):
        nodes = tree.get_word_nodes()
        for index, node in enumerate(nodes):
            if node.ind > -1:
                node.vec = We[:, node.ind].reshape((d, 1))

        prop.forward_prop([rel_Wr_dict, Wv, Wc, b, b_c, We], tree, d, c, labels=False)
        y_pred = list()
        for n in nodes[1:]:
            if n.is_word:
                yi_pred = n.predict_label.flatten().argmax()
                y_pred.append(yi_pred)
        cur_aspect_terms = utils.aspect_terms_from_labeled(tree, y_pred)
        for t in cur_aspect_terms:
            aspect_words.add(t)

        aspect_pred_cnt += len(cur_aspect_terms)
        cur_aspects_true = sents_test[ind].get('terms', list())
        aspect_true_cnt += len(cur_aspects_true)
        new_hit_cnt = 0
        for t in cur_aspects_true:
            if t['term'] in cur_aspect_terms:
                new_hit_cnt += 1
        hit_cnt += new_hit_cnt

        if new_hit_cnt == len(cur_aspects_true) and new_hit_cnt == len(cur_aspect_terms):
            correctness.append(1)
        else:
            correctness.append(0)
        # if len(cur_aspect_terms) != len(cur_aspects_true) or not hit:
        #     tree.disp()
        #     print(y_pred)
        #     print(cur_aspect_terms)
        #     print(sents_test[ind])
        #     print()

    # p, r, f1 = utils.set_evaluate(aspect_terms_true, aspect_words)
    # print(p, r, f1)

    p1, r1 = hit_cnt / aspect_pred_cnt, hit_cnt / aspect_true_cnt
    print(p1, r1, 2 * p1 * r1 / (p1 + r1))
    # for w in aspect_terms_true:
    #     if w not in aspect_words:
    #         print(w)
    with open('d:/data/aspect/semeval14/deprnn-correctness.txt', 'w', encoding='utf-8') as fout:
        fout.write('\n'.join([str(i) for i in correctness]))
Beispiel #7
0
def objective_and_grad(par_data):

    #params, d, len_voc, rel_list = par_data[0]
    params, d, c, len_voc, rel_list1, rel_list2 = par_data[0]
    data = par_data[1]
    #params = unroll_params(params, d, len_voc, rel_list)
    #return [rel_dict, Wv, Wc, b, b_c, We]
    params = unroll_params(params, d, c, len_voc, rel_list1, rel_list2)

    # returns list of zero gradients which backprop modifies
    #grads = init_dtrnn_grads(rel_list, d, len_voc)
    grads = init_dtrnn_grads(rel_list1, rel_list2, d, c, len_voc)

    #(rel_dict, Wv, b, L) = params
    (rel_dict1, rel_dict2, Wv_1, Wv_2, Wc, b_1, b_2, b_c, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape((d, 1))

        #tree.ans_vec = L[:, tree.ans_ind].reshape( (d, 1))

        #prop.forward_prop(params, tree, d)
        prop.forward_prop(params, tree, d, c)
        error_sum += tree.error()
        tree_size += len(nodes)

        #prop.backprop(params[:-1], tree, d, len_voc, grads)
        prop.backprop(params[:-1], tree, d, c, len_voc, grads)

    grad = roll_params(grads, rel_list1, rel_list2)
    return (error_sum, grad, tree_size)
Beispiel #8
0
def objective_and_grad(par_data):

    #params, d, len_voc, rel_list = par_data[0]
    params, d, c, len_voc, rel_list1, rel_list2 = par_data[0]
    data = par_data[1]
    #params = unroll_params(params, d, len_voc, rel_list)
    #return [rel_dict, Wv, Wc, b, b_c, We]
    params = unroll_params(params, d, c, len_voc, rel_list1, rel_list2)
    
    # returns list of zero gradients which backprop modifies
    #grads = init_dtrnn_grads(rel_list, d, len_voc)
    grads = init_dtrnn_grads(rel_list1, rel_list2, d, c, len_voc)

    #(rel_dict, Wv, b, L) = params
    (rel_dict1, rel_dict2, Wv_1, Wv_2, Wc, b_1, b_2, b_c, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape( (d, 1) )

        #tree.ans_vec = L[:, tree.ans_ind].reshape( (d, 1))

        #prop.forward_prop(params, tree, d)
        prop.forward_prop(params, tree, d, c)
        error_sum += tree.error()
        tree_size += len(nodes)

        #prop.backprop(params[:-1], tree, d, len_voc, grads)
        prop.backprop(params[:-1], tree, d, c, len_voc, grads)

    grad = roll_params(grads, rel_list1, rel_list2)
    return (error_sum, grad, tree_size)