Пример #1
0
    def first_neural_network(self, file, learning_rate, momentum, l2_penalty,
                             epoch):
        '''Initializing node list, dict list and dict sibling'''
        # we parse the data of the file into a tree
        tree = file_parser(file)
        # convert its nodes into the Node class we have, and assign their attributes
        ls_nodes, dict_ast_to_Node = node_object_creator(tree)
        ls_nodes = node_position_assign(ls_nodes)
        ls_nodes, dict_sibling = node_sibling_assign(ls_nodes)
        ls_nodes = leaves_nodes_assign(ls_nodes, dict_ast_to_Node)

        # Initializing vector embeddings
        embed = Embedding(self.vector_size, ls_nodes, dict_ast_to_Node)
        ls_nodes = embed.node_embedding()

        # Calculate the vector representation for each node
        vector_representation = First_neural_network(ls_nodes,
                                                     dict_ast_to_Node,
                                                     self.vector_size,
                                                     learning_rate, momentum,
                                                     l2_penalty, epoch)
        ls_nodes, w_l_code, w_r_code, b_code = vector_representation.vector_representation(
        )

        return [
            ls_nodes, dict_ast_to_Node, dict_sibling, w_l_code, w_r_code,
            b_code
        ]
Пример #2
0
def set_up_dynamic_pooling_layer():
    tree = file_parser('test\pruebas.py')
    ls_nodes, dict_ast_to_Node = node_object_creator(tree)
    ls_nodes = node_position_assign(ls_nodes)
    ls_nodes, dict_sibling = node_sibling_assign(ls_nodes)
    embed = Embedding(20, ls_nodes, dict_ast_to_Node)
    ls_nodes = embed.node_embedding()[:]
    vector_representation = First_neural_network(ls_nodes, dict_ast_to_Node,
                                                 20, 0.1, 0.001)
    ls_nodes, w_l, w_r, b_code = vector_representation.vector_representation()
    w_comb1 = torch.diag(torch.randn(20, dtype=torch.float32)).requires_grad_()
    w_comb2 = torch.diag(torch.randn(20, dtype=torch.float32)).requires_grad_()
    coding_layer = Coding_layer(20, w_comb1, w_comb2)
    ls_nodes = coding_layer.coding_layer(ls_nodes, dict_ast_to_Node, w_l, w_r,
                                         b_code)
    w_t = torch.randn(4, 20, requires_grad=True)
    w_r = torch.randn(4, 20, requires_grad=True)
    w_l = torch.randn(4, 20, requires_grad=True)
    b_conv = torch.randn(4, requires_grad=True)
    convolutional_layer = Convolutional_layer(20,
                                              w_t,
                                              w_r,
                                              w_l,
                                              b_conv,
                                              features_size=4)
    ls_nodes = convolutional_layer.convolutional_layer(ls_nodes,
                                                       dict_ast_to_Node)
    max_pooling_layer = Max_pooling_layer()
    max_pooling_layer.max_pooling(ls_nodes)
    dynamic_pooling = Dynamic_pooling_layer()
    hidden_input = dynamic_pooling.three_way_pooling(ls_nodes, dict_sibling)

    return ls_nodes, hidden_input
Пример #3
0
def set_up_matrix():
    tree = file_parser('test\pruebas.py')
    ls_nodes, dict_ast_to_Node = node_object_creator(tree)
    embed = Embedding(20, ls_nodes, dict_ast_to_Node)
    ls_nodes = embed.node_embedding()[:]
    matrices = MatrixGenerator(20, 10)
    return matrices
Пример #4
0
def set_up_one_max_pooling_layer():
    path = os.path.join('test', 'generators')
    data = os.path.join(path, 'prueba.py')
    tree = file_parser(data)
    ls_nodes, dict_ast_to_Node = node_object_creator(tree)
    ls_nodes = node_position_assign(ls_nodes)
    ls_nodes, dict_sibling = node_sibling_assign(ls_nodes)
    ls_nodes = leaves_nodes_assign(ls_nodes, dict_ast_to_Node)
    ls_nodes = leaves_nodes_assign(ls_nodes, dict_ast_to_Node)
    embed = Embedding(20, ls_nodes, dict_ast_to_Node)
    ls_nodes = embed.node_embedding()[:]
    vector_representation = First_neural_network(ls_nodes, dict_ast_to_Node,
                                                 20, 0.1, 0.001, 0, 5)
    ls_nodes, w_l, w_r, b_code = vector_representation.vector_representation()
    w_comb1 = torch.diag(torch.randn(20, dtype=torch.float32)).requires_grad_()
    w_comb2 = torch.diag(torch.randn(20, dtype=torch.float32)).requires_grad_()
    coding_layer = Coding_layer(20, w_comb1, w_comb2)
    ls_nodes = coding_layer.coding_layer(ls_nodes, dict_ast_to_Node, w_l, w_r,
                                         b_code)
    w_t = torch.randn(4, 20, requires_grad=True)
    w_r = torch.randn(4, 20, requires_grad=True)
    w_l = torch.randn(4, 20, requires_grad=True)
    b_conv = torch.randn(4, requires_grad=True)
    convolutional_layer = Convolutional_layer(20,
                                              w_t,
                                              w_r,
                                              w_l,
                                              b_conv,
                                              features_size=4)
    ls_nodes = convolutional_layer.convolutional_layer(ls_nodes,
                                                       dict_ast_to_Node)
    pooling_layer = Pooling_layer()
    pooled_tensor = pooling_layer.pooling_layer(ls_nodes)

    return pooled_tensor
Пример #5
0
def set_up_vector_representation():
    tree = file_parser('test\pruebas.py')
    ls_nodes, dict_ast_to_Node = node_object_creator(tree)
    embed = Embedding(20, ls_nodes, dict_ast_to_Node)
    ls_nodes = embed.node_embedding()[:]
    vector_representation = First_neural_network(ls_nodes, dict_ast_to_Node,
                                                 20, 0.1, 0.001)
    ls_nodes, w_l, w_r, b_code = vector_representation.vector_representation()
    return ls_nodes, w_l, w_r, b_code
Пример #6
0
def set_up_matrix():
    path = os.path.join('test', 'generators')
    data = os.path.join(path, 'prueba.py')
    tree = file_parser(data)
    ls_nodes, dict_ast_to_Node = node_object_creator(tree)
    embed = Embedding(20, ls_nodes, dict_ast_to_Node)
    ls_nodes = embed.node_embedding()[:]
    matrices = MatrixGenerator(20, 10)
    return matrices
Пример #7
0
def set_up_vector_representation():
    path = os.path.join('test', 'generators')
    data = os.path.join(path, 'prueba.py')
    tree = file_parser(data)
    ls_nodes, dict_ast_to_Node = node_object_creator(tree)
    ls_nodes = leaves_nodes_assign(ls_nodes, dict_ast_to_Node)
    embed = Embedding(20, ls_nodes, dict_ast_to_Node)
    ls_nodes = embed.node_embedding()[:]
    vector_representation = First_neural_network(ls_nodes, dict_ast_to_Node,
                                                 20, 0.1, 0.001, 0, 5)
    ls_nodes, w_l, w_r, b_code = vector_representation.vector_representation()
    return ls_nodes, w_l, w_r, b_code
Пример #8
0
def set_up_coding_layer():
    tree = file_parser('test\pruebas.py')
    ls_nodes, dict_ast_to_Node = node_object_creator(tree)
    embed = Embedding(20, ls_nodes, dict_ast_to_Node)
    ls_nodes = embed.node_embedding()[:]
    vector_representation = First_neural_network(ls_nodes, dict_ast_to_Node,
                                                 20, 0.1, 0.001)
    ls_nodes, w_l, w_r, b_code = vector_representation.vector_representation()
    w_comb1 = torch.diag(torch.randn(20, dtype=torch.float32)).requires_grad_()
    w_comb2 = torch.diag(torch.randn(20, dtype=torch.float32)).requires_grad_()
    coding_layer = Coding_layer(20, w_comb1, w_comb2)
    ls_nodes = coding_layer.coding_layer(ls_nodes, dict_ast_to_Node, w_l, w_r,
                                         b_code)
    return ls_nodes, w_comb1, w_comb2
Пример #9
0
def set_up_coding_layer():
    path = os.path.join('test', 'generators')
    data = os.path.join(path, 'prueba.py')
    tree = file_parser(data)
    ls_nodes, dict_ast_to_Node = node_object_creator(tree)
    ls_nodes = leaves_nodes_assign(ls_nodes, dict_ast_to_Node)
    embed = Embedding(20, ls_nodes, dict_ast_to_Node)
    ls_nodes = embed.node_embedding()[:]
    vector_representation = First_neural_network(ls_nodes, dict_ast_to_Node,
                                                 20, 0.1, 0.001, 0, 5)
    ls_nodes, w_l, w_r, b_code = vector_representation.vector_representation()
    w_comb1 = torch.diag(torch.randn(20, dtype=torch.float32)).requires_grad_()
    w_comb2 = torch.diag(torch.randn(20, dtype=torch.float32)).requires_grad_()
    coding_layer = Coding_layer(20, w_comb1, w_comb2)
    ls_nodes = coding_layer.coding_layer(ls_nodes, dict_ast_to_Node, w_l, w_r,
                                         b_code)
    return ls_nodes, w_comb1, w_comb2
Пример #10
0
def set_up_hidden_layer():
    path = os.path.join('test', 'generators')
    data = os.path.join(path, 'prueba.py')
    tree = file_parser(data)
    ls_nodes, dict_ast_to_Node = node_object_creator(tree)
    ls_nodes = node_position_assign(ls_nodes)
    ls_nodes, dict_sibling = node_sibling_assign(ls_nodes)
    ls_nodes = leaves_nodes_assign(ls_nodes, dict_ast_to_Node)
    embed = Embedding(20, ls_nodes, dict_ast_to_Node)
    ls_nodes = embed.node_embedding()[:]
    vector_representation = First_neural_network(ls_nodes, dict_ast_to_Node,
                                                 20, 0.1, 0.001, 0, 5)
    ls_nodes, w_l, w_r, b_code = vector_representation.vector_representation()
    w_comb1 = torch.diag(torch.randn(20, dtype=torch.float32)).requires_grad_()
    w_comb2 = torch.diag(torch.randn(20, dtype=torch.float32)).requires_grad_()
    coding_layer = Coding_layer(20, w_comb1, w_comb2)
    ls_nodes = coding_layer.coding_layer(ls_nodes, dict_ast_to_Node, w_l, w_r,
                                         b_code)
    w_t = torch.randn(4, 20, requires_grad=True)
    w_r = torch.randn(4, 20, requires_grad=True)
    w_l = torch.randn(4, 20, requires_grad=True)
    b_conv = torch.randn(4, requires_grad=True)
    convolutional_layer = Convolutional_layer(20,
                                              w_t,
                                              w_r,
                                              w_l,
                                              b_conv,
                                              features_size=4)
    ls_nodes = convolutional_layer.convolutional_layer(ls_nodes,
                                                       dict_ast_to_Node)
    max_pooling_layer = Max_pooling_layer()
    max_pooling_layer.max_pooling(ls_nodes)
    dynamic_pooling = Dynamic_pooling_layer()
    hidden_input = dynamic_pooling.three_way_pooling(ls_nodes, dict_sibling)
    w_hidden = torch.randn(3, requires_grad=True)
    b_hidden = torch.randn(1, requires_grad=True)
    hidden = Hidden_layer(w_hidden, b_hidden)
    output_hidden = hidden.hidden_layer(hidden_input)

    return output_hidden, w_hidden, b_hidden
Пример #11
0
def first_neural_network(training_dict,
                         vector_size=20,
                         learning_rate=0.1,
                         momentum=0.01,
                         l2_penalty=0,
                         epoch=45):
    total = len(training_dict)
    i = 1
    for data in training_dict:
        # Initializing node list, dict list and dict sibling

        # we parse the data of the file into a tree
        tree = file_parser(data)
        # convert its nodes into the Node class we have, and assign their attributes
        ls_nodes, dict_ast_to_Node = node_object_creator(tree)
        ls_nodes = node_position_assign(ls_nodes)
        ls_nodes, dict_sibling = node_sibling_assign(ls_nodes)
        ls_nodes = leaves_nodes_assign(ls_nodes, dict_ast_to_Node)

        # Initializing vector embeddings
        embed = Embedding(vector_size, ls_nodes, dict_ast_to_Node)
        ls_nodes = embed.node_embedding()

        # Calculate the vector representation for each node
        vector_representation = First_neural_network(ls_nodes,
                                                     dict_ast_to_Node,
                                                     vector_size,
                                                     learning_rate, momentum,
                                                     l2_penalty, epoch)
        ls_nodes, w_l_code, w_r_code, b_code = vector_representation.vector_representation(
        )

        training_dict[data] = [
            ls_nodes, dict_ast_to_Node, dict_sibling, w_l_code, w_r_code,
            b_code
        ]
        print(f"finished vector representation of file: {data} ({i}/{total})")
        i += 1
    return training_dict