Exemplo n.º 1
0
def train(net: NeuralNetwork,
          inputs: Tensor,
          targets: Tensor,
          num_epochs: int = 5000,
          iterator: DataIterator = BatchIterator(),
          loss: Loss = CrossEntropy(),
          optimizer: Optimizer = MBGD(),
          showGraph: bool = False) -> None:
    losses = []
    for epoch in range(num_epochs):
        epoch_loss = 0.0
        for batch in iterator(inputs, targets):
            for X, Y in zip(batch.inputs, batch.targets):
                predicted = net.forward(X)
                epoch_loss += loss.loss(predicted, Y)
                grad = loss.grad(predicted, Y)
                net.backwards(grad)
                optimizer.step(net)

        print(epoch, epoch_loss)
        losses.append(epoch_loss)
        if epoch_loss < 300:
            pass
    if showGraph:
        plt.plot(losses)
        plt.show()
Exemplo n.º 2
0
def train_nn(
        net: NeuralNet,
        inputs: Tensor,
        targets: Tensor,
        epochs: int = 1,
        loss: Loss = MSE(),
        batch_iter: DataIterator = BatchIterator(),
        optimizer: Optimizer = SGD(),
) -> None:

    for epoch in range(epochs):
        epoch_loss = 0.

        for batch in batch_iter(inputs, targets):

            pred = net.forward(batch.input)

            batch_loss = loss.loss(pred, batch.target)
            epoch_loss += batch_loss

            loss_grad = loss.grad(pred, batch.target)

            net_grad = net.backward(loss_grad)

            optimizer.step(net)

        print(f'epoch:{epoch}, loss:{epoch_loss}')
def train_prediction(
        net: Neural_network.NeuralNet,
        inputs_train: Tensor,
        targets_train: Tensor,
        inputs_test: Tensor,
        targets_test: Tensor,
        loss: Loss.Loss = Loss.MeanSquareError(),
        optimizer: OptimizerClass.Optimizer = OptimizerClass.SGD(),
        num_epochs: int = 5000,
        batch_size: int = 32):
    Data = pd.DataFrame(columns=('MSE_train', 'MSE_test', 'error_round_train',
                                 'error_round_test'))
    size_training = inputs_train.shape[0]
    for epoch in range(num_epochs):
        Chi2_train = 0.0
        error_round_train = 0.0
        nbr_batch = 0

        for i in range(0, size_training, batch_size):
            nbr_batch += 1

            # 1) feed forward
            y_actual = net.forward(inputs_train[i:i + batch_size])

            # 2) compute the loss and the gradients
            Chi2_train += loss.loss(targets_train[i:i + batch_size], y_actual)
            grad_ini = loss.grad(targets_train[i:i + batch_size], y_actual)

            # 3)feed backwards
            grad_fini = net.backward(grad_ini)

            # 4) update the net
            optimizer.step(net, n_epoch=epoch)

            error_round_train += Error_round.error_round(
                targets_train[i:i + batch_size], y_actual)

        Chi2_train = Chi2_train / nbr_batch
        error_round_train = error_round_train / nbr_batch

        y_actual_test = net.forward(inputs_test)
        Chi2_test = loss.loss(targets_test, y_actual_test)
        error_round_test = Error_round.error_round(targets_test, y_actual_test)

        if epoch % 100 == 0:
            print('epoch : ' + str(epoch) + "/" + str(num_epochs) + "\r",
                  end="")

        datanew = pd.DataFrame({
            'MSE_train': [Chi2_train],
            'MSE_test': [Chi2_test],
            'error_round_train': [error_round_train],
            'error_round_test': [error_round_test]
        })
        Data = Data.append(datanew)

    os.chdir(path_ini)
    Data.to_csv('Opt_num_epoch_backup.csv', index=False)

    return Data
Exemplo n.º 4
0
def train(net: NeuralNet,
          inputs: Tensor,
          targets: Tensor,
          num_epochs: int = 5000,
          iterator: DataIterator = BatchIterator(),
          loss: Loss = MSE(),
          optimizer: Optimizer = SGD()
          ) -> None:
    for epoch in range(num_epochs):
        epoch_loss = 0.0
        for batch in iterator(inputs, targets):
            predicted = net.forward(batch.inputs)
            epoch_loss += loss.loss(predicted, batch.targets)
            grad = loss.grad(predicted, batch.targets)
            net.backward(grad)
            optimizer.step(net)
        print(epoch, epoch_loss)
Exemplo n.º 5
0
def train(net: NetWork,
          inputs: Tensor,
          targets: Tensor,
          epochs: int = 500,
          loss: Loss = MSE(),
          optimizer: Optimizer = SGD(),
          iterator: DataIterator = BatchIterator(),
          show_info: bool = False):
    for epoch in range(epochs):
        epoch_loss = .0
        for batch_inputs, batch_targets in iterator(inputs, targets):
            predictions = net.forward(batch_inputs)
            epoch_loss += loss.loss(predictions, batch_targets)
            grad = loss.grad(predictions, batch_targets)
            net.backward(grad)
            optimizer.step(net)
        if show_info:
            print('epoch:{},  loss:{}'.format(epoch, epoch_loss))
Exemplo n.º 6
0
def train_simultaneousNN(
        inputs_train: Tensor,
        targets_train: Tensor,
        loss: Loss.Loss = Loss.MeanSquareError(),
        optimizer: OptimizerClass.Optimizer = OptimizerClass.SGD(),
        num_epochs: int = 5000,
        batch_size: int = 32) -> tuple:

    size_training = inputs_train.shape[0]
    Result_chi2 = [[], [], [], [], [], [], [], [], []]
    list_epoch = np.array(range(10, 50, 5)) / 100 * num_epochs
    '''initialisation des 9 NN'''  #verifier question seed()
    list_net = []
    for i in range(9):
        layers = []
        layers.append(Layer.Linear(6, 4))
        layers.append(ActivationFunctions.Tanh())
        layers.append(Layer.Linear(4, 2))
        layers.append(ActivationFunctions.Tanh())
        layers.append(Layer.Linear(2, 1))
        layers.append(ActivationFunctions.Sigmoid())
        list_net.append(Neural_network.NeuralNet(layers))

    destroyed_NN = []
    nbr_batch = size_training // batch_size
    ''' training des 9 NN'''
    for epoch in range(num_epochs):

        for k in range(9):
            if k not in destroyed_NN:
                Chi2_train = 0

                for i in range(0, size_training, batch_size):

                    # 1) feed forward
                    y_actual = list_net[k].forward(inputs_train[i:i +
                                                                batch_size])

                    # 2) compute the loss and the gradients
                    Chi2_train += loss.loss(targets_train[i:i + batch_size],
                                            y_actual)
                    grad_ini = loss.grad(targets_train[i:i + batch_size],
                                         y_actual)

                    # 3)feed backwards
                    grad_fini = list_net[k].backward(grad_ini)

                    # 4) update the net
                    optimizer.step(list_net[k], n_epoch=epoch)

                Chi2_train = Chi2_train / nbr_batch
                Result_chi2[k].append(Chi2_train)
        '''Supression du NN le moins efficace '''
        if epoch in list_epoch:
            Comparaison = [[], []]
            for k in range(9):
                if k not in destroyed_NN:
                    ErrorSlope = np.polyfit(np.array(range(epoch - 49, epoch)),
                                            Result_chi2[k][-50:-1], 1)[0]
                    MixedError = Result_chi2[k][-1] * (1 -
                                                       np.arctan(ErrorSlope) /
                                                       (np.pi / 2))
                    Comparaison[0].append(k)
                    Comparaison[1].append(MixedError)

            k = Comparaison[0][Comparaison[1].index(max(Comparaison[1]))]
            destroyed_NN.append(k)

        if epoch % 100 == 0:
            print('epoch : ' + str(epoch) + "/" + str(num_epochs) + "\r",
                  end="")

    for k in range(9):
        if k not in destroyed_NN:
            my_NN = list_net[k]
    return my_NN, Result_chi2