Beispiel #1
0
def test_nbr_neuron(list_test):
    color_list=['r','g','b','k','m','c','y']
    color_list *= 3
    k=0
    for i in list_test :
        my_layer1 = Layer.Linear(6,i)
        my_layer2 = ActivationFunctions.Tanh()
        my_layer5 = Layer.Linear(i,i)
        my_layer6 = ActivationFunctions.Tanh()
        my_layer3 = Layer.Linear(i,1)
        my_layer4 = ActivationFunctions.Sigmoid()
        my_NN = Neural_network.NeuralNet([my_layer1, my_layer2, my_layer5, my_layer6, my_layer3, my_layer4])
        
        
        chi2_list, error_list = User.train(my_NN, data_train_input, data_train_target, num_epochs = num_epoch_max, optimizer = Optimizer.SGD(lr = my_lr), batch_size=my_batch_size)
        
        data_test_prediction = User.prediction(my_NN,data_test_input)
        error_final = Error_round.error_round(data_test_prediction, data_test_target)

        plt.plot(range(num_epoch_max), error_list, label= str(i), c=color_list[k])
        plt.plot([num_epoch_max],[error_final], marker='o', c=color_list[k])
        plt.xlabel('Epoch')
        plt.ylabel('Training round error')
        
        k+=1
    plt.legend(title='Neurons')
    plt.title('Optimisation of the number of neurons')
    plt.show()
Beispiel #2
0
def test_nbr_layer(list_test, n_neuron):
    color_list=['r','g','b','k','m','c','y']
    color_list *= 3
    k=0
    
    my_layerini1 = Layer.Linear(6,n_neuron)
    my_layerini2 = ActivationFunctions.Tanh()
    my_layerfini1 = Layer.Linear(n_neuron,1)
    my_layerfini2 = ActivationFunctions.Sigmoid()
        
    for i in list_test :
        layers_new = [my_layerini1, my_layerini2]
        for j in range(i) :
            layers_new += [Layer.Linear(n_neuron,n_neuron),ActivationFunctions.Tanh()]
        layers_new += [my_layerfini1, my_layerfini2]
        my_NN = Neural_network.NeuralNet(layers_new)
        
        
        chi2_list, error_list = User.train(my_NN, data_train_input, data_train_target, num_epochs = num_epoch_max,optimizer = Optimizer.SGD(lr = my_lr), batch_size=my_batch_size)
        data_test_prediction = User.prediction(my_NN,data_test_input)
        
        error_final = Error_round.error_round(data_test_prediction, data_test_target)
        
        plt.plot(range(num_epoch_max), error_list, label= str(i),c=color_list[k])
        plt.plot([num_epoch_max],[error_final], marker='o', c=color_list[k])
        plt.xlabel('Epoch')
        plt.ylabel('Training round error')
        
        k+=1
    plt.legend(title='Hidden layers')
    plt.title('Optimisation of the number of hidden layers')
    plt.show()
Beispiel #3
0
 def __init__(self, args, vocab):
     self.model = nn.Sequential()
     self.model.add_layer(nn.WordEmbedding(args['vocab_size'], args['embedding_dim'], init_wt=args['init_wt']))
     self.model.add_layer(nn.Linear(args['embedding_dim'] * args['context_len'], args['num_hid'], init_wt=args['init_wt']))
     self.model.add_layer(nn.Sigmoid())
     self.model.add_layer(nn.Linear(args['num_hid'], args['vocab_size'], init_wt=args['init_wt']))
     self.model.add_layer(nn.SoftMax())
     self.criterion = nn.CrossEntropy(args['vocab_size'])
     self.vocab = vocab
Beispiel #4
0
def test_XOR() :
    my_layer1 = Layer.Linear(2,3)
    my_layer2 = ActivationFunctions.Tanh()
    my_layer3 = Layer.Linear(3,1)
    my_layer4 = ActivationFunctions.Sigmoid()
    #my_layer3 = lib2.Arondi()
    my_NN = Neural_network.NeuralNet([my_layer1,my_layer2,my_layer3,my_layer4])
    
    input =np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
    target = np.array([[0], [1], [1], [0]])
    
    User.train(my_NN, input, target, batch_size = 1,,num_epochs= 1000)
    # By careful, we must have size_training = number of rows in our data
    
    input_predict = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
    print(User.prediction(my_NN,input_predict))
    ''' OK '''
Beispiel #5
0
def test_LinearLayer() :
    input = np.array([[1, 1, 1, 1, 1, 1]])
    grad = np.array([[0.5,0.2,0.3,0.1]])
    lin = Layer.Linear(6,4) #taille input et output
    print('Y', lin.forward(input))
    print( 'grad',lin.backward(grad))
    print('grad_w',lin.grad_w)
    print('grad_b',lin.grad_b)
    ''' OK '''
Beispiel #6
0
 def __init__(self, depth=8, lrmul=0.01):
     super(G_mapping, self).__init__()
     layers = [layer.PixelNorm()]
     for i in range(depth):
         layers += [
             layer.Linear(512, 512, lrmul=lrmul),
             nn.LeakyReLU(negative_slope=0.2)
         ]
     self.layers = nn.Sequential(*layers)
Beispiel #7
0
def test_NeuralNet() :
    my_layer1 = Layer.Linear(3,2)
    my_layer2 = ActivationFunctions.Tanh()
    my_NN = Neural_network.NeuralNet([my_layer1,my_layer2])
    
    input = np.array([[1,2,3],[4,5,6]])
    grad =  np.array([[0.5,0.2],[0.1,0.3]])
    
    
    print('forward', my_NN.forward(input))
    print('backward', my_NN.backward(grad))
    '''OK'''
Beispiel #8
0
def test_train_prediction() :
    my_layer1 = Layer.Linear(3,2)
    my_layer2 = ActivationFunctions.Tanh()
    my_NN = Neural_network.NeuralNet([my_layer1,my_layer2])
    
    input = np.array([[1,2,3],[4,5,6]])
    target = np.array([[0.5,0.2],[0.1,0.3]])
    
    User.train(my_NN, input, target, batch_size = 1)
    #By careful, we must have size_training = number of rows in our data
    
    input_predict = np.array([[1,1,4],[0.5,2,4]])
    print(User.prediction(my_NN,input_predict))
    ''' OK '''
Beispiel #9
0
def train_simultaneousNN(
        inputs_train: Tensor,
        targets_train: Tensor,
        loss: Loss.Loss = Loss.MeanSquareError(),
        optimizer: OptimizerClass.Optimizer = OptimizerClass.SGD(),
        num_epochs: int = 5000,
        batch_size: int = 32) -> tuple:

    size_training = inputs_train.shape[0]
    Result_chi2 = [[], [], [], [], [], [], [], [], []]
    list_epoch = np.array(range(10, 50, 5)) / 100 * num_epochs
    '''initialisation des 9 NN'''  #verifier question seed()
    list_net = []
    for i in range(9):
        layers = []
        layers.append(Layer.Linear(6, 4))
        layers.append(ActivationFunctions.Tanh())
        layers.append(Layer.Linear(4, 2))
        layers.append(ActivationFunctions.Tanh())
        layers.append(Layer.Linear(2, 1))
        layers.append(ActivationFunctions.Sigmoid())
        list_net.append(Neural_network.NeuralNet(layers))

    destroyed_NN = []
    nbr_batch = size_training // batch_size
    ''' training des 9 NN'''
    for epoch in range(num_epochs):

        for k in range(9):
            if k not in destroyed_NN:
                Chi2_train = 0

                for i in range(0, size_training, batch_size):

                    # 1) feed forward
                    y_actual = list_net[k].forward(inputs_train[i:i +
                                                                batch_size])

                    # 2) compute the loss and the gradients
                    Chi2_train += loss.loss(targets_train[i:i + batch_size],
                                            y_actual)
                    grad_ini = loss.grad(targets_train[i:i + batch_size],
                                         y_actual)

                    # 3)feed backwards
                    grad_fini = list_net[k].backward(grad_ini)

                    # 4) update the net
                    optimizer.step(list_net[k], n_epoch=epoch)

                Chi2_train = Chi2_train / nbr_batch
                Result_chi2[k].append(Chi2_train)
        '''Supression du NN le moins efficace '''
        if epoch in list_epoch:
            Comparaison = [[], []]
            for k in range(9):
                if k not in destroyed_NN:
                    ErrorSlope = np.polyfit(np.array(range(epoch - 49, epoch)),
                                            Result_chi2[k][-50:-1], 1)[0]
                    MixedError = Result_chi2[k][-1] * (1 -
                                                       np.arctan(ErrorSlope) /
                                                       (np.pi / 2))
                    Comparaison[0].append(k)
                    Comparaison[1].append(MixedError)

            k = Comparaison[0][Comparaison[1].index(max(Comparaison[1]))]
            destroyed_NN.append(k)

        if epoch % 100 == 0:
            print('epoch : ' + str(epoch) + "/" + str(num_epochs) + "\r",
                  end="")

    for k in range(9):
        if k not in destroyed_NN:
            my_NN = list_net[k]
    return my_NN, Result_chi2
Beispiel #10
0
 print("\t Sigmoïd Output Activation with Binary Cross Entropy Loss (0)")
 print("\t Tanh Output Activation with Mean Square Error Loss (1)")
 choice = int(input("[DEFAULT = 1] : ") or "1")
 print("")
 
 train_input = torch.rand((1000,2))
 train_target = torch.rand((1000,1))
 test_input = torch.rand((1000,2))
 test_target = torch.rand((1000,1))
 
 if choice == 0:
     epochs = int(input("Please input how many epochs you want to train over [DEFAULT = 6000] : ") or "6000")
     print("")
     criterion = loss.LossBCE()
     model = sequential.Sequential(
         layer.Linear(2, 25),
         activation.ReLU(),
         layer.Linear(25, 50),
         activation.ReLU(),
         layer.Linear(50, 50),
         activation.ReLU(),
         layer.Linear(50, 25),
         activation.ReLU(),
         layer.Linear(25, 1),
         activation.Sigmoid()
     )
     train_target[((train_input-0.5)**2).sum(1) < 1/(2*math.pi)] = 0
     train_target[((train_input-0.5)**2).sum(1) >= 1/(2*math.pi)] = 1
     test_target[((test_input-0.5)**2).sum(1) < 1/(2*math.pi)] = 0
     test_target[((test_input-0.5)**2).sum(1) >= 1/(2*math.pi)] = 1
     ps = model.parameters()
Beispiel #11
0
'''Seed'''
np.random.seed(1)
'''size of the training set and the testing set '''
train_size = 3000
test_size = 1500
'''Maximal number of epochs '''
my_num_epochs = 500
'''size of the batch'''
my_batch_size = 100
''' learning rate'''
my_lr = 0.001

my_initial_lr = 0.1
my_decay_coeff = 1 / 200
'''Construction of the neural network '''
my_layer1 = Layer.Linear(6, 5)
my_layer2 = ActivationFunctions.Tanh()
my_layer3 = Layer.Linear(5, 4)
my_layer4 = ActivationFunctions.Tanh()
my_layer5 = Layer.Linear(4, 3)
my_layer6 = ActivationFunctions.Tanh()
my_layer7 = Layer.Linear(3, 2)
my_layer8 = ActivationFunctions.Tanh()
my_layer9 = Layer.Linear(2, 1)
my_layer10 = ActivationFunctions.Sigmoid()
my_NN = Neural_network.NeuralNet([
    my_layer1, my_layer2, my_layer3, my_layer4, my_layer5, my_layer6,
    my_layer7, my_layer8, my_layer9, my_layer10
])

## Importation of the training and testing data
import Neural_Network_Library.loss as Loss
import Neural_Network_Library.layer as Layer
import Neural_Network_Library.error_round as Error_round
import Neural_Network_Library.activation_functions as ActivationFunctions
import Neural_Network_Library.neural_network as Neural_network
import Neural_Network_Library.optimizer as OptimizerClass
import Neural_Network_Library.user as User
'''

plt.close()

# Parameters' choice
'''seed '''
np.random.seed(1)
'''Construction of the neural network '''
my_layer1 = Layer.Linear(6, 4)
my_layer2 = ActivationFunctions.Tanh()
my_layer5 = Layer.Linear(4, 2)
my_layer6 = ActivationFunctions.Tanh()
my_layer3 = Layer.Linear(2, 1)
my_layer4 = ActivationFunctions.Sigmoid()
my_NN = Neural_network.NeuralNet(
    [my_layer1, my_layer2, my_layer5, my_layer6, my_layer3, my_layer4])
'''Maximal number of epochs '''
Nmax = 50000
'''size of the training set and the testing set '''
train_size = 3000
test_size = 1500
'''size of the batch'''
my_batch_size = 100
''' learning rate'''
Beispiel #13
0
                        height=a_max_sent_size + 2 * (filter_width - 1),
                        width=overlap_ndim + word_dim)
    non_line = layer.Activation(tf.tanh, nkernals=kernals)
    max_pool = layer.Maxpool(
        ksize=[1, a_max_sent_size + filter_width - 1, 1, 1])
    flatten = layer.Flatten(batch_size=batch_size)
    net_a = layer.FeedForwardNet(
        [lookup_words, conv, non_line, max_pool, flatten])
    net_a.set_input([x_a, x_a_overlap])

with tf.name_scope('pair_combine'):
    pair_combine = layer.PairCombine(shape1=kernals, shape2=kernals)
    pair_combine.set_input([net_q.output, net_a.output])

with tf.name_scope('hiddent_layer'):
    hidden_layer = layer.Linear(n_in=2 * kernals + 1, n_out=2 * kernals + 1)
    hidden_layer.set_input(pair_combine.output)

with tf.name_scope('LR'):
    lr_layer = layer.LR(n_in=2 * kernals + 1, n_out=n_class)
    lr_layer.set_input(hidden_layer.output)

#Save Graph
tf.add_to_collection('x_q', x_q)
tf.add_to_collection('x_q_overlap', x_q_overlap)
tf.add_to_collection('x_a', x_a)
tf.add_to_collection('x_a_overlap', x_a_overlap)
tf.add_to_collection('pred', lr_layer.output)

init = tf.global_variables_initializer()