コード例 #1
0
ファイル: qanta.py プロジェクト: chiraggiri/NLQA
def objective_and_grad(par_data):

    params, d, len_voc, rel_list = par_data[0]
    data = par_data[1]
    params = unroll_params(params, d, len_voc, rel_list)
    grads = init_dtrnn_grads(rel_list, d, len_voc)

    (rel_dict, Wv, b, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape( (d, 1))

        tree.ans_vec = L[:, tree.ans_ind].reshape( (d, 1))

        prop.forward_prop(params, tree, d)
        error_sum += tree.error()
        tree_size += len(nodes)

        prop.backprop(params[:-1], tree, d, len_voc, grads)

    grad = roll_params(grads, rel_list)
    return (error_sum, grad, tree_size)
コード例 #2
0
def objective_and_grad(par_data):

    params, d, len_voc, rel_list = par_data[0]
    data = par_data[1]
    params = unroll_params(params, d, len_voc, rel_list)
    grads = init_dtrnn_grads(rel_list, d, len_voc)

    (rel_dict, Wv, b, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape((d, 1))

        tree.ans_vec = L[:, tree.ans_ind].reshape((d, 1))

        prop.forward_prop(params, tree, d)
        error_sum += tree.error()
        tree_size += len(nodes)

        prop.backprop(params[:-1], tree, d, len_voc, grads)

    grad = roll_params(grads, rel_list)
    return (error_sum, grad, tree_size)
コード例 #3
0
ファイル: rncrfpretrain.py プロジェクト: hldai/aspectex
def objective_and_grad(all_params, trees_batch):
    params_train, d, n_classes, len_voc, rel_list = all_params

    # returns list of initialized zero gradients which backprop modifies
    # rel_Wr, Wv, Wc, b, b_c, We
    grads = init_dtrnn_grads(rel_list, d, n_classes, len_voc)
    rel_Wr_dict, Wv, Wc, b, b_c, We = params_train

    error_sum = 0.0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for idx, tree in enumerate(trees_batch):
        nodes = tree.get_word_nodes()
        for node in nodes:
            node.vec = We[:, node.ind].reshape((d, 1))

        prop.forward_prop(params_train, tree, d, n_classes)
        error_sum += tree.error()
        tree_size += len(nodes)

        prop.backprop(params_train[:-1], tree, d, n_classes, len_voc, grads)

    return error_sum, grads, tree_size
コード例 #4
0
def objective_and_grad(par_data):

    params, d, c, len_voc, rel_list = par_data[0]
    data = par_data[1]

    # returns list of initialized zero gradients which backprop modifies
    grads = init_dtrnn_grads(rel_list, d, c, len_voc)
    (rel_dict, Wv, Wc, b, b_c, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape((d, 1))

        prop.forward_prop(params, tree, d, c)
        error_sum += tree.error()
        tree_size += len(nodes)

        prop.backprop(params[:-1], tree, d, c, len_voc, grads)

    return (error_sum, grads, tree_size)
コード例 #5
0
ファイル: train_depnn.py プロジェクト: happywwy/rncrf
def objective_and_grad(par_data):

    params, d, c, len_voc, rel_list = par_data[0]
    data = par_data[1]
    
    # returns list of initialized zero gradients which backprop modifies
    grads = init_dtrnn_grads(rel_list, d, c, len_voc)
    (rel_dict, Wv, Wc, b, b_c, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape( (d, 1) )

        prop.forward_prop(params, tree, d, c)
        error_sum += tree.error()
        tree_size += len(nodes)

        prop.backprop(params[:-1], tree, d, c, len_voc, grads)

    return (error_sum, grads, tree_size)
コード例 #6
0
ファイル: train.py プロジェクト: happywwy/Bidir_depnn_AE
def objective_and_grad(par_data):

    #params, d, len_voc, rel_list = par_data[0]
    params, d, c, len_voc, rel_list1, rel_list2 = par_data[0]
    data = par_data[1]
    #params = unroll_params(params, d, len_voc, rel_list)
    #return [rel_dict, Wv, Wc, b, b_c, We]
    params = unroll_params(params, d, c, len_voc, rel_list1, rel_list2)

    # returns list of zero gradients which backprop modifies
    #grads = init_dtrnn_grads(rel_list, d, len_voc)
    grads = init_dtrnn_grads(rel_list1, rel_list2, d, c, len_voc)

    #(rel_dict, Wv, b, L) = params
    (rel_dict1, rel_dict2, Wv_1, Wv_2, Wc, b_1, b_2, b_c, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape((d, 1))

        #tree.ans_vec = L[:, tree.ans_ind].reshape( (d, 1))

        #prop.forward_prop(params, tree, d)
        prop.forward_prop(params, tree, d, c)
        error_sum += tree.error()
        tree_size += len(nodes)

        #prop.backprop(params[:-1], tree, d, len_voc, grads)
        prop.backprop(params[:-1], tree, d, c, len_voc, grads)

    grad = roll_params(grads, rel_list1, rel_list2)
    return (error_sum, grad, tree_size)
コード例 #7
0
ファイル: train.py プロジェクト: happywwy/Bidir_depnn_AE
def objective_and_grad(par_data):

    #params, d, len_voc, rel_list = par_data[0]
    params, d, c, len_voc, rel_list1, rel_list2 = par_data[0]
    data = par_data[1]
    #params = unroll_params(params, d, len_voc, rel_list)
    #return [rel_dict, Wv, Wc, b, b_c, We]
    params = unroll_params(params, d, c, len_voc, rel_list1, rel_list2)
    
    # returns list of zero gradients which backprop modifies
    #grads = init_dtrnn_grads(rel_list, d, len_voc)
    grads = init_dtrnn_grads(rel_list1, rel_list2, d, c, len_voc)

    #(rel_dict, Wv, b, L) = params
    (rel_dict1, rel_dict2, Wv_1, Wv_2, Wc, b_1, b_2, b_c, L) = params

    error_sum = 0.0
    num_nodes = 0
    tree_size = 0

    # compute error and gradient for each tree in minibatch
    # also keep track of total number of nodes in minibatch
    for index, tree in enumerate(data):

        nodes = tree.get_nodes()
        for node in nodes:
            node.vec = L[:, node.ind].reshape( (d, 1) )

        #tree.ans_vec = L[:, tree.ans_ind].reshape( (d, 1))

        #prop.forward_prop(params, tree, d)
        prop.forward_prop(params, tree, d, c)
        error_sum += tree.error()
        tree_size += len(nodes)

        #prop.backprop(params[:-1], tree, d, len_voc, grads)
        prop.backprop(params[:-1], tree, d, c, len_voc, grads)

    grad = roll_params(grads, rel_list1, rel_list2)
    return (error_sum, grad, tree_size)