def train_prediction(
        net: Neural_network.NeuralNet,
        inputs_train: Tensor,
        targets_train: Tensor,
        inputs_test: Tensor,
        targets_test: Tensor,
        loss: Loss.Loss = Loss.MeanSquareError(),
        optimizer: OptimizerClass.Optimizer = OptimizerClass.SGD(),
        num_epochs: int = 5000,
        batch_size: int = 32):
    Data = pd.DataFrame(columns=('MSE_train', 'MSE_test', 'error_round_train',
                                 'error_round_test'))
    size_training = inputs_train.shape[0]
    for epoch in range(num_epochs):
        Chi2_train = 0.0
        error_round_train = 0.0
        nbr_batch = 0

        for i in range(0, size_training, batch_size):
            nbr_batch += 1

            # 1) feed forward
            y_actual = net.forward(inputs_train[i:i + batch_size])

            # 2) compute the loss and the gradients
            Chi2_train += loss.loss(targets_train[i:i + batch_size], y_actual)
            grad_ini = loss.grad(targets_train[i:i + batch_size], y_actual)

            # 3)feed backwards
            grad_fini = net.backward(grad_ini)

            # 4) update the net
            optimizer.step(net, n_epoch=epoch)

            error_round_train += Error_round.error_round(
                targets_train[i:i + batch_size], y_actual)

        Chi2_train = Chi2_train / nbr_batch
        error_round_train = error_round_train / nbr_batch

        y_actual_test = net.forward(inputs_test)
        Chi2_test = loss.loss(targets_test, y_actual_test)
        error_round_test = Error_round.error_round(targets_test, y_actual_test)

        if epoch % 100 == 0:
            print('epoch : ' + str(epoch) + "/" + str(num_epochs) + "\r",
                  end="")

        datanew = pd.DataFrame({
            'MSE_train': [Chi2_train],
            'MSE_test': [Chi2_test],
            'error_round_train': [error_round_train],
            'error_round_test': [error_round_test]
        })
        Data = Data.append(datanew)

    os.chdir(path_ini)
    Data.to_csv('Opt_num_epoch_backup.csv', index=False)

    return Data
Ejemplo n.º 2
0
def train(config, exp_name, data_path, resume=False, tune=False):

    results_dir = os.path.join(data_path, exp_name)
    if os.path.exists(results_dir) and not (resume or tune):
        print("{} already exists, no need to train.\n".format(results_dir))
        return
    if not os.path.exists(results_dir):
        os.makedirs(results_dir)
    json.dump(config,
              open(os.path.join(results_dir, 'config.json'), 'w'),
              sort_keys=True,
              separators=(',\n', ': '))

    is_dev = config['is_dev']
    print("\n***{} MODE***\n".format('DEV' if is_dev else 'TEST'))
    if not is_dev:
        print("\n***Changing TEST to DEV***\n")
        config['is_dev'] = True
    data_set = data_loader.read_dataset(data_path,
                                        results_dir,
                                        dev_mode=True,
                                        max_examples=float('inf'))
    cuda = torch.cuda.is_available()

    print("Number of training data points {}".format(len(data_set['train'])))
    print("Number of dev data points {}".format(len(data_set['test'])))
    # Provide train and dev data to negative sampler for filtering positives
    data = copy.copy(data_set['train'])
    data.extend(data_set['test'])
    model, neg_sampler, evaluator = build_model(data, config, results_dir,
                                                data_set['num_ents'],
                                                data_set['num_rels'])
    model = is_gpu(model, cuda)
    state = None
    if resume or tune:
        params_path = os.path.join(results_dir,
                                   '{}_params.pt'.format(config['model']))
        model.load_state_dict(torch.load(params_path))
    if resume:
        state_path = os.path.join(results_dir,
                                  '{}_optim_state.pt'.format(config['model']))
        state = torch.load(state_path)
    if config['neg_sampler'] == 'rl':
        sgd = optimizer.Reinforce(data_set['train'], data_set['dev'], model,
                                  neg_sampler, evaluator, results_dir, config,
                                  state)
    else:
        sgd = optimizer.SGD(data_set['train'], data_set['dev'], model,
                            neg_sampler, evaluator, results_dir, config, state)

    start = time.time()
    sgd.minimize()
    end = time.time()
    hours = int((end - start) / 3600)
    minutes = ((end - start) % 3600) / 60.
    profile_string = "Finished Training! Took {} hours and {} minutes\n".format(
        hours, minutes)
    with open(os.path.join(results_dir, 'train_time'), 'w') as f:
        f.write(profile_string + "Raw seconds {}\n".format(end - start))
    print(profile_string)
def build_optimizer_from_layer_parameters(layer_parameters: Dict):
    if _OPTIMIZER not in layer_parameters:
        _optimizer = optimiser.SGD()  # Use SGD() as default
    else:
        optimizer_spec = layer_parameters[_OPTIMIZER]
        assert \
            _SCHEME in optimizer_spec, \
            "Invalid optimizer specification %s" % optimizer_spec

        optimizer_scheme = optimizer_spec[_SCHEME].lower()
        assert optimizer_scheme in optimiser.SCHEMES, \
            "Invalid optimizer scheme %s. Scheme must be one of %s." \
            % (optimizer_scheme, optimiser.SCHEMES)

        _optimizer = \
            optimiser.SCHEMES[optimizer_scheme].build(optimizer_spec[_PARAMETERS]) \
            if _PARAMETERS in optimizer_spec else optimiser.SGD()
    return _optimizer
def Opt_learning_rate(list_learning_rate):
    for my_lr in list_learning_rate:
        Data = User.train(my_NN,
                          data_train_input,
                          data_train_target,
                          num_epochs=Nmax,
                          optimizer=OptimizerClass.SGD(lr=my_lr),
                          batch_size=my_batch_size)[1]
        plt.plot(range(Nmax), Data, label=str(my_lr))

    plt.xlabel('Epoch')
    plt.ylabel(' Training mean squared error')
    plt.legend(title='Learning rate impact')
    plt.show()
Ejemplo n.º 5
0
    def __init__(self,
                 name: str,
                 num_nodes: int,
                 W: np.ndarray,
                 posteriors: Optional[List[Layer]] = None,
                 optimizer: optimiser.Optimizer = optimiser.SGD(),
                 log_level: int = logging.ERROR):
        """Initialize a matmul layer that has 'num_nodes' nodes
        Input X:(N,D) is a batch. D is number of features NOT including bias
        Weight W:(M, D+1) is the layer weight including bias weight.
        Args:
            name: Layer identity name
            num_nodes: Number of nodes in the layer
            W: Weight of shape(M=num_nodes, D+1). A row is a weight vector of a node.
            posteriors: Post layers to which forward the matmul layer output
            optimizer: Gradient descent implementation e.g SGD, Adam.
            log_level: logging level
        """
        super().__init__(name=name, num_nodes=num_nodes, log_level=log_level)

        # --------------------------------------------------------------------------------
        # W: weight matrix of shape(M,D) where M=num_nodes
        # Gradient dL/dW has the same shape shape(M, D) with W because L is scalar.
        #
        # Not use WT because W keeps updated every cycle, hence need to update WT as well.
        # Hence not much performance gain and risk of introducing bugs.
        # self._WT: np.ndarray = W.T          # transpose of W
        # --------------------------------------------------------------------------------
        assert W.shape[0] == num_nodes, \
            f"W shape needs to be ({num_nodes}, D) but {W.shape}."
        self._D = W.shape[1]  # number of features in x including bias
        self._W: np.ndarray = copy.deepcopy(W)  # node weight vectors
        self._dW: np.ndarray = np.empty(0, dtype=TYPE_FLOAT)

        # --------------------------------------------------------------------------------
        # State of the layer
        # --------------------------------------------------------------------------------
        self._S = {}

        self.logger.debug(
            "Matmul[%s] W.shape is [%s], number of nodes is [%s]", name,
            W.shape, num_nodes)
        # --------------------------------------------------------------------------------
        # Optimizer for gradient descent
        # Z(n+1) = optimiser.update((Z(n), dL/dZ(n)+regularization)
        # --------------------------------------------------------------------------------
        assert isinstance(optimizer, optimiser.Optimizer)
        self._optimizer: optimiser.Optimizer = optimizer
Ejemplo n.º 6
0
    def __init__(self, X_train, Y_train, Net='LeNet5', opti='SGDMomentum'):
        # Prepare Data: Load, Shuffle, Normalization, Batching, Preprocessing
        self.X_train = X_train
        self.Y_train = Y_train

        self.batch_size = 64
        # D_in: input depth of network, 784, 28*28 input grayscale image
        self.D_in = 784
        # D_out: output depth of network = 10, the 10 digits
        self.D_out = 10

        print('  Net: ' + str(Net))
        print('  batch_size: ' + str(self.batch_size))
        print('  D_in: ' + str(self.D_in))
        print('  D_out: ' + str(self.D_out))
        print('  Optimizer: ' + opti)

        # =======================
        if Net == 'TwoLayerNet':
            # H is the size of the one hidden layer.
            H = 400
            self.model = ANN.TwoLayerNet(self.D_in, H, self.D_out)
        elif Net == 'ThreeLayerNet':
            #######################################
            ############  TODO   ##################
            #######################################
            # H1, H2 are the size of the two hidden layers.
            #self.model = ANN.ThreeLayerNet (self.D_in, H1, H2, self.D_out)
            print('Not Implemented.')
            exit(0)
        elif Net == 'LeNet5':
            self.model = CNN.LeNet5()

        # store training loss over iterations, for later visualization
        self.losses = []

        if opti == 'SGD':
            self.opti = optimizer.SGD(self.model.get_params(),
                                      lr=0.0001,
                                      reg=0)
        else:
            self.opti = optimizer.SGDMomentum(self.model.get_params(),
                                              lr=0.0001,
                                              momentum=0.80,
                                              reg=0.00003)

        self.criterion = loss.CrossEntropyLoss()
Ejemplo n.º 7
0
def train(net: Neural_network.NeuralNet,
          inputs: Tensor,
          targets: Tensor,
          loss: Loss = Loss.MeanSquareError(),
          optimizer: OptimizerClass.Optimizer = OptimizerClass.SGD(),
          num_epochs: int = 5000,
          batch_size: int = 32) -> tuple:
    chi2_list = []
    round_error_list = []
    size_training = inputs.shape[0]
    for epoch in range(num_epochs):
        chi2_loss = 0.0
        round_error_loss = 0.0
        nbr_batch = 0

        for i in range(0, size_training, batch_size):
            nbr_batch += 1

            # 1) Feed forward
            y_actual = net.forward(inputs[i:i + batch_size])

            # 2) Compute the loss and the gradient
            chi2_loss += loss.loss(targets[i:i + batch_size], y_actual)
            round_error_loss += Error_round.error_round(
                targets[i:i + batch_size], y_actual)
            grad_ini = loss.grad(targets[i:i + batch_size], y_actual)

            # 3) Feed backwards
            grad_fini = net.backward(grad_ini)

            # 4) Update the net
            optimizer.step(net, n_epoch=epoch)

        chi2_loss = chi2_loss / nbr_batch
        round_error_loss = round_error_loss / nbr_batch
        chi2_list.append(chi2_loss)
        round_error_list.append(round_error_loss)

        # Print status every 50 iterations
        if epoch % 50 == 0:
            print('\r epoch : ' + str(epoch) + "/" + str(num_epochs) +
                  ", training mean squared error : " + str(chi2_loss) + "\r",
                  end="")
    print('epoch : ' + str(epoch) + "/" + str(num_epochs) +
          ", training final mean squared error : " + str(chi2_loss) + '\n')

    return chi2_list, round_error_list
Ejemplo n.º 8
0
    def backward(self, actual):
        """ Backward propagation

        Args:
            actual(np.ndarray): actual y's, right lables

        """
        # for every layer reversed we backpropagate
        error = None
        # Radnoming i for extracting ith sample from tenosr
        # for stochastic gradient descent
        i = random.randint(0, actual.shape[0] - 1)
        for layer in reversed(self.layers):
            error = layer.backward(i, error, actual, self.loss)
            # Updating weights
            optim = optimizer.SGD(layer, self.learning_rate)
            optim.step()
Ejemplo n.º 9
0
def train_model(model, train_input, train_target, loss_fnc, nb_epochs, lr):
    '''
    Supervised machine learning model training.
    '''
    print("-------------------- Training --------------------")
    optimizer = optim.SGD(model.param(), lr)
    mini_batch_size = 100
    base = int(nb_epochs / 10)
    model.initParameters()
    l = []
    for e in range(nb_epochs + 1):
        for b in range(0, train_input.size(0), mini_batch_size):
            output = model(train_input.narrow(0, b, mini_batch_size))
            loss = loss_fnc(output, train_target.narrow(0, b, mini_batch_size))
            model.zero_grad()
            loss.backward()
            optimizer.step()
        if e % base == 0:
            print('Epochs', e, ': Loss ', loss.loss.item())
            l.append(loss.loss.item())
    print("--------------------------------------------------\n")
    return l
def Opt_nbr_epoch():
    '''
    Evolution of the chi2 et our special round error of the training and testing set according to the number of epoch.
    '''
    Data = train_prediction(my_NN,
                            data_train_input,
                            data_train_target,
                            data_test_input,
                            data_test_target,
                            num_epochs=Nmax,
                            optimizer=OptimizerClass.SGD(lr=my_lr),
                            batch_size=my_batch_size)
    print(Data)
    plt.plot(range(Nmax), Data['MSE_train'], label='training')
    plt.plot(range(Nmax), Data['MSE_test'], label='testing')

    plt.xlabel('Epoch')
    plt.ylabel(r'Mean Squared Error')
    plt.legend()
    plt.title('Learning Curve')

    plt.show()
Ejemplo n.º 11
0
    def __init__(self, const):
        self.const = const
        lastLayer = layer.InputLayer()
        for l in range(len(const)):
            layerInfo = const[l]
            if layerInfo['type'] == 'Aff':
                if layerInfo['opt'] == 'Adam':
                    opt = op.Adam(layerInfo['size'])
                elif layerInfo['opt'] == 'SGD':
                    opt = op.SGD()

                lastLayer = layer.AffineLayer(layerInfo['size'], lastLayer, opt)
            
            elif layerInfo['type'] == 'Act':
                if layerInfo['func'] == 'ReLU':
                    act = ac.ReLU()
                elif layerInfo['func'] == 'Sigmoid':
                    act = ac.Sigmoid()
                elif layerInfo['func'] == 'Softmax':
                    act = ac.Softmax()

                lastLayer = layer.ActLayer(lastLayer.outNode, lastLayer, act)

        self.outputLayer = lastLayer
Ejemplo n.º 12
0
        if epoch % 100 == 0:
            print('epoch : ' + str(epoch) + "/" + str(num_epochs) + "\r",
                  end="")

    for k in range(9):
        if k not in destroyed_NN:
            my_NN = list_net[k]
    return my_NN, Result_chi2


## User's function

Results = train_simultaneousNN(data_train_input,
                               data_train_target,
                               num_epochs=Nmax,
                               optimizer=OptimizerClass.SGD(lr=my_lr),
                               batch_size=my_batch_size)

for k in range(9):
    Y = Results[1][k]
    X = range(len(Y))
    plt.plot(X, Y)
plt.ylabel('Mean Squared Error')
plt.xlabel('Epoch')
plt.title('Simultaneous NN : training curve')
'''testing '''
data_test_prediction = User.prediction(Results[0], data_test_input)

error = Error_round.error_round(data_test_prediction, data_test_target)
print('% of false error for testing set : ', error)
'''histogram of predictions'''
Ejemplo n.º 13
0
    (x_train, y_train), (x_test, y_test) = load_mnist(
        normalize=True, flatten=True, one_hot_label=False)
    if flag == "train":
        return x_train, y_train
    elif flag == "test":
        return x_test, y_test
    else:
        print("data type error!!!")


batch_size = 100  # 批数量
lr = 0.1
EPOCH = 100

net = simpleFC(784, 50, 10)
optim = optimizer.SGD(lr=lr)
x_train, y_train = get_data(flag="train")
train_size = x_train.shape[0]
x_test, y_test = get_data(flag="test")

train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in tqdm(range(EPOCH)):
    # train
    for i in range(0, len(x_train), batch_size):
        batch_mask = np.random.choice(train_size, batch_size)
        x = x_train[batch_mask]
        y_ = y_train[batch_mask]
Ejemplo n.º 14
0
X_train -= np.mean(X_train)
X_test -= np.mean(X_test)
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)

#X_train = X_train[:6000]
#Y_train = Y_train[:6000]
#X_test = X_test[:1000]
#Y_test = Y_test[:1000]

batch_size = 16
D_out = 10

model = nn.LeNet5()
losses = []
optim = optimizer.SGD(model.get_params(), lr=0.00003)
#optim = optimizer.SGDMomentum(model.get_params(), lr=0.00003, momentum=0.80, reg=0.0003)
criterion = loss.SoftmaxLoss()

# Train
ITER = 30000
for i in range(ITER):
    # get batch, make onehot
    X_batch, Y_batch = util.get_batch(X_train, Y_train, batch_size)
    Y_batch = util.MakeOneHot(Y_batch, D_out)

    # forward, loss, backward, step
    Y_pred = model.forward(X_batch)
    loss, dout = criterion.get(Y_pred, Y_batch)
    model.backward(dout)
    optim.step()
Ejemplo n.º 15
0
    print('\tOptim parameters:\t{}'.format(optim_params))
    print('\tModel parameters:\t{}'.format(fit_params))

    ### Init model, loss and optimzer
    model = model.Model(input_size=X.shape[1],
                        init_weights=fit_params['init_weights'])
    my_loss = loss.LogisticLoss(reg_coeff=loss_params['weight_decay'])
    # Choose optimizer:
    if optim_params['type'] == 'gd':
        optim = optimizer.GD(params=model.weights,
                             loss=my_loss,
                             learn_rate=optim_params['lr'],
                             tollerance=optim_params['tollerance'])
    elif optim_params['type'] == 'sgd':
        optim = optimizer.SGD(params=model.weights,
                              loss=my_loss,
                              learn_rate=optim_params['lr'],
                              tollerance=optim_params['tollerance'])
    elif optim_params['type'] == 'svrg':
        optim = optimizer.SVRG(params=model.weights,
                               loss=my_loss,
                               learn_rate=optim_params['lr'],
                               tollerance=optim_params['tollerance'],
                               iter_epoch=optim_params['iter_epoch'])
    else:
        raise NotImplementedError

    ### Shape info
    print('-Displaying shape of data and variable')
    print('\tInput: {}'.format(X.shape))
    print('\tTarget: {}'.format(y.shape))
    print('\tWeights (with bias): {}'.format(model.weights.shape))
Ejemplo n.º 16
0
         activation.ReLU(),
         layer.Linear(25, 50),
         activation.ReLU(),
         layer.Linear(50, 50),
         activation.ReLU(),
         layer.Linear(50, 25),
         activation.ReLU(),
         layer.Linear(25, 1),
         activation.Sigmoid()
     )
     train_target[((train_input-0.5)**2).sum(1) < 1/(2*math.pi)] = 0
     train_target[((train_input-0.5)**2).sum(1) >= 1/(2*math.pi)] = 1
     test_target[((test_input-0.5)**2).sum(1) < 1/(2*math.pi)] = 0
     test_target[((test_input-0.5)**2).sum(1) >= 1/(2*math.pi)] = 1
     ps = model.parameters()
     optim = optimizer.SGD(model.parameters(), lr=0.05, decay=500)
     #for plotting later
     levels = [0, 0.25, 0.5, 0.75, 1]
     #for accuracy computation
     sigmoid = True
  
 else:
     epochs = int(input("Please input how many epochs you want to train over [DEFAULT = 4000] : ") or "4000")
     print("")
     criterion = loss.LossMSE()
     model = sequential.Sequential(
                 layer.Linear(2, 25),
                 activation.ReLU(),
                 layer.Linear(25, 50),
                 activation.ReLU(),
                 layer.Linear(50, 50),
Ejemplo n.º 17
0
def run(args=None):
    usage = "usage : %prog [options]"
    parser = optparse.OptionParser(usage = usage)

    parser.add_option("--test", action = "store_true", dest = "test", default = False)

    # Paramsfile includes hyperparameters for training
    parser.add_option('--params_file', dest = "params_file", default = './params/exp_params.json',
                      help = "Path to the file  containing the training settings")
    parser.add_option('--data_dir', dest = "data_dir", default = './trees',
                      help = "Directory containing the trees")

    # Directory containing the model to test
    parser.add_option("--model_directory", dest = "test_dir", type = "string")
    parser.add_option("--data", dest = "data", type = "string", default = "train")

    (opts, args) = parser.parse_args(args)

    results_dir = "./results"
    if opts.test:
        pass
    else:
        results_dir_current_job = os.path.join(results_dir, utils.now_as_str_f())
        while os.path.isdir(results_dir_current_job):  # generate a new timestamp if the current one already exists
            results_dir_current_job = os.path.join(results_dir, utils.now_as_str_f())
        os.makedirs(results_dir_current_job)

    # Load training settings (e.g. hyperparameters)
    params = utils.Params(opts.params_file)

    if opts.test:
        pass
    else:
        # Copy the settings file into the results directory
        copyfile(opts.params_file, os.path.join(results_dir_current_job, os.path.basename(opts.params_file)))

    # Get the logger
    if opts.test:
        log_path = os.path.join(opts.test_dir, 'testing.log')
    else:
        log_path = os.path.join(results_dir_current_job, 'training.log')
    log_level = params.log_level if hasattr(params, 'log_level') else logging.DEBUG
    log = utils.get_logger(log_path, log_level)

    if opts.test:
        log.info("Testing directory: " + opts.test_dir)
        log.info("Dataset used for testing: " + opts.data)
    else:
        log.info("Results directory: " + results_dir_current_job)
        log.info("Minibatch: " + str(params.optimizer_settings['minibatch']))
        log.info("Optimizer: " + params.optimizer)
        log.info("Epsilon: " + str(params.optimizer_settings['epsilon']))
        log.info("Alpha: " + str(params.optimizer_settings['alpha']))
        log.info("Number of samples used: " + str(params.sample_size))

    # Testing
    if opts.test:
        test(opts.test_dir, opts.data)
        return

    log.info("Loading data...")
    # load training data
    trees = tr.loadTrees(sample_size = params.sample_size)
    params.numWords = len(tr.loadWordMap())
    overall_performance = pd.DataFrame()

    rnn = nnet.RNN(params.wvecDim, params.outputDim, params.numWords, params.optimizer_settings['minibatch'])
    rnn.initParams()

    sgd = optimizer.SGD(rnn, alpha = params.optimizer_settings['alpha'],
                        minibatch = params.optimizer_settings['minibatch'],
                        optimizer = params.optimizer, epsilon = params.optimizer_settings['epsilon'])

    best_val_cost = float('inf')
    best_epoch = 0

    for e in range(params.num_epochs):
        start = time.time()
        log.info("Running epoch %d" % e)
        df, updated_model, train_cost, train_acc = sgd.run(trees)
        end = time.time()
        log.info("Time per epoch : %f" % (end - start))
        log.info("Training accuracy : %f" % train_acc)
        # VALIDATION
        val_df, val_cost, val_acc = validate(updated_model, results_dir_current_job)

        if val_cost < best_val_cost:
            # best validation cost we have seen so far
            log.info("Validation score improved, saving model")
            best_val_cost = val_cost
            best_epoch = e
            best_epoch_row = {"epoch": e, "train_cost": train_cost, "val_cost": val_cost, "train_acc": train_acc,
                              "val_acc": val_acc}
            with open(results_dir_current_job + "/checkpoint.bin", 'w') as fid:
                pickle.dump(params, fid)
                pickle.dump(sgd.costt, fid)
                rnn.toFile(fid)

        val_df.to_csv(results_dir_current_job + "/validation_preds_epoch_ " + str(e) + ".csv", header = True, index = False)
        df.to_csv(results_dir_current_job + "/training_preds_epoch_" + str(e) + ".csv", header = True, index = False)

        row = {"epoch": e, "train_cost": train_cost, "val_cost": val_cost, "train_acc": train_acc, "val_acc": val_acc}
        overall_performance = overall_performance.append(row, ignore_index = True)

        # break if no val loss improvement in the last epochs
        if (e - best_epoch) >= params.num_epochs_early_stop:
            log.tinfo("No improvement in the last {num_epochs_early_stop} epochs, stop training.".format(num_epochs_early_stop=params.num_epochs_early_stop))
            break

    overall_performance = overall_performance.append(best_epoch_row, ignore_index = True)
    overall_performance.to_csv(results_dir_current_job + "/train_val_costs.csv", header = True, index = False)
    log.info("Experiment end")
Ejemplo n.º 18
0
    toll = 0.001
    epochs = 40
    starting_point = sp = -5

    loss_fn = loss.LogisticLoss(reg_coeff=reg_coeff)
    W = np.arange(lims[0], lims[1], lims[2])
    losses = [
        np.round(loss_fn.compute_loss(X, y, np.array([[w]])), 3) for w in W
    ]

    gd = optimizer.GD(params=np.array([[sp]]),
                      loss=loss_fn,
                      learn_rate=lr,
                      tollerance=toll)
    sgd = optimizer.SGD(params=np.array([[sp]]),
                        loss=loss_fn,
                        learn_rate=lr,
                        tollerance=toll)
    #sag = optimizer.SAG(params=np.array([[sp]]), loss=loss_fn, learn_rate=lr, tollerance=toll)
    svrg = optimizer.SVRG(params=np.array([[sp]]),
                          loss=loss_fn,
                          learn_rate=lr,
                          tollerance=toll,
                          iter_epoch=10)
    results1 = gd.run(X, y, epochs)
    results2 = sgd.run(X, y, epochs // 3)
    results4 = svrg.run(X, y, epochs // 2)
    # print('\nComputed losses:\n\t{}\n'.format(losses))
    # print('Results:')
    # print('\tLosses: {}'.format(np.round(results['loss_list'], 3)))
    # print('\tParams: {}'.format([np.round(float(i[0]), 3) for i in results['params_list']]))
    # print('\tOutput: {}'.format(np.dot(X, results['params_list'][-1]).transpose()))
Ejemplo n.º 19
0
    train_target[((train_input-0.5)**2).sum(1) < 1/(2*math.pi)] = -1
    train_target[((train_input-0.5)**2).sum(1) >= 1/(2*math.pi)] = 1

    test_input = torch.rand((1000,2))
    test_target = torch.rand((1000,1))
    test_target[((test_input-0.5)**2).sum(1) < 1/(2*math.pi)] = -1
    test_target[((test_input-0.5)**2).sum(1) >= 1/(2*math.pi)] = 1

    #Normalization
    mu, std = train_input.mean(0), train_input.std(0)
    train_input.sub_(mu).div_(std)
    test_input.sub_(mu).div_(std)

    epochs = 10000
    ps = model.parameters()
    optim = optimizer.SGD(model.parameters())
    for i in range(epochs):

        output = model(train_input)

        optim.zero_grad()
        gradwrrtxL = criterion.backward(output, train_target)

        model.backward(gradwrrtxL)

        optim.step()


        if i % 10 == 0:
            test_accuracyV = test_accuracy(model, test_input, test_target)
            print(criterion.forward(output, train_target), test_accuracyV, test_accuracy(model, train_input, train_target))
Ejemplo n.º 20
0
            if i % 100 == 0:
                print(i)
            self.forw = inparr[i]
            for i in range(len(self.layers) - 1):
                self.forw = self.layers[i].forward(self.forw)
            self.out = self.layers[len(self.layers) - 1].forward(self.forw)
            self.arr.append(self.out)
        self.arr = np.array(self.arr)
        return self.arr


if __name__ == "__main__":
    layer1 = layerslib.Sigmoid(2, 2, True)
    layer2 = layerslib.Sigmoid(2, 1, True)
    layerarr = [layer1, layer2]
    opt = optimizer.SGD(0.5, 0.1, layerarr)
    model = Seqential(layerslib.MSE(), opt)

    inp = [[0, 0], [0, 1], [1, 0], [1, 1]]
    out = [0, 1, 1, 0]

    curloss = layerslib.MSE()

    for i in range(5000):
        for j in range(4):
            opt.zero_grad()
            ourout = model.forward(inp[j])
            c = curloss.loss(ourout, out[j])
            curloss.backward()
            opt.step()
Ejemplo n.º 21
0
def test_nbr_neuron(list_test):
    color_list=['r','g','b','k','m','c','y']
    color_list *= 3
    k=0
    for i in list_test :
        my_layer1 = Layer.Linear(6,i)
        my_layer2 = ActivationFunctions.Tanh()
        my_layer5 = Layer.Linear(i,i)
        my_layer6 = ActivationFunctions.Tanh()
        my_layer3 = Layer.Linear(i,1)
        my_layer4 = ActivationFunctions.Sigmoid()
        my_NN = Neural_network.NeuralNet([my_layer1, my_layer2, my_layer5, my_layer6, my_layer3, my_layer4])
        
        
        chi2_list, error_list = User.train(my_NN, data_train_input, data_train_target, num_epochs = num_epoch_max, optimizer = Optimizer.SGD(lr = my_lr), batch_size=my_batch_size)
        
        data_test_prediction = User.prediction(my_NN,data_test_input)
        error_final = Error_round.error_round(data_test_prediction, data_test_target)

        plt.plot(range(num_epoch_max), error_list, label= str(i), c=color_list[k])
        plt.plot([num_epoch_max],[error_final], marker='o', c=color_list[k])
        plt.xlabel('Epoch')
        plt.ylabel('Training round error')
        
        k+=1
    plt.legend(title='Neurons')
    plt.title('Optimisation of the number of neurons')
    plt.show()
Ejemplo n.º 22
0
def test_nbr_layer(list_test, n_neuron):
    color_list=['r','g','b','k','m','c','y']
    color_list *= 3
    k=0
    
    my_layerini1 = Layer.Linear(6,n_neuron)
    my_layerini2 = ActivationFunctions.Tanh()
    my_layerfini1 = Layer.Linear(n_neuron,1)
    my_layerfini2 = ActivationFunctions.Sigmoid()
        
    for i in list_test :
        layers_new = [my_layerini1, my_layerini2]
        for j in range(i) :
            layers_new += [Layer.Linear(n_neuron,n_neuron),ActivationFunctions.Tanh()]
        layers_new += [my_layerfini1, my_layerfini2]
        my_NN = Neural_network.NeuralNet(layers_new)
        
        
        chi2_list, error_list = User.train(my_NN, data_train_input, data_train_target, num_epochs = num_epoch_max,optimizer = Optimizer.SGD(lr = my_lr), batch_size=my_batch_size)
        data_test_prediction = User.prediction(my_NN,data_test_input)
        
        error_final = Error_round.error_round(data_test_prediction, data_test_target)
        
        plt.plot(range(num_epoch_max), error_list, label= str(i),c=color_list[k])
        plt.plot([num_epoch_max],[error_final], marker='o', c=color_list[k])
        plt.xlabel('Epoch')
        plt.ylabel('Training round error')
        
        k+=1
    plt.legend(title='Hidden layers')
    plt.title('Optimisation of the number of hidden layers')
    plt.show()
Ejemplo n.º 23
0
def train_simultaneousNN(
        inputs_train: Tensor,
        targets_train: Tensor,
        loss: Loss.Loss = Loss.MeanSquareError(),
        optimizer: OptimizerClass.Optimizer = OptimizerClass.SGD(),
        num_epochs: int = 5000,
        batch_size: int = 32) -> tuple:

    size_training = inputs_train.shape[0]
    Result_chi2 = [[], [], [], [], [], [], [], [], []]
    list_epoch = np.array(range(10, 50, 5)) / 100 * num_epochs
    '''initialisation des 9 NN'''  #verifier question seed()
    list_net = []
    for i in range(9):
        layers = []
        layers.append(Layer.Linear(6, 4))
        layers.append(ActivationFunctions.Tanh())
        layers.append(Layer.Linear(4, 2))
        layers.append(ActivationFunctions.Tanh())
        layers.append(Layer.Linear(2, 1))
        layers.append(ActivationFunctions.Sigmoid())
        list_net.append(Neural_network.NeuralNet(layers))

    destroyed_NN = []
    nbr_batch = size_training // batch_size
    ''' training des 9 NN'''
    for epoch in range(num_epochs):

        for k in range(9):
            if k not in destroyed_NN:
                Chi2_train = 0

                for i in range(0, size_training, batch_size):

                    # 1) feed forward
                    y_actual = list_net[k].forward(inputs_train[i:i +
                                                                batch_size])

                    # 2) compute the loss and the gradients
                    Chi2_train += loss.loss(targets_train[i:i + batch_size],
                                            y_actual)
                    grad_ini = loss.grad(targets_train[i:i + batch_size],
                                         y_actual)

                    # 3)feed backwards
                    grad_fini = list_net[k].backward(grad_ini)

                    # 4) update the net
                    optimizer.step(list_net[k], n_epoch=epoch)

                Chi2_train = Chi2_train / nbr_batch
                Result_chi2[k].append(Chi2_train)
        '''Supression du NN le moins efficace '''
        if epoch in list_epoch:
            Comparaison = [[], []]
            for k in range(9):
                if k not in destroyed_NN:
                    ErrorSlope = np.polyfit(np.array(range(epoch - 49, epoch)),
                                            Result_chi2[k][-50:-1], 1)[0]
                    MixedError = Result_chi2[k][-1] * (1 -
                                                       np.arctan(ErrorSlope) /
                                                       (np.pi / 2))
                    Comparaison[0].append(k)
                    Comparaison[1].append(MixedError)

            k = Comparaison[0][Comparaison[1].index(max(Comparaison[1]))]
            destroyed_NN.append(k)

        if epoch % 100 == 0:
            print('epoch : ' + str(epoch) + "/" + str(num_epochs) + "\r",
                  end="")

    for k in range(9):
        if k not in destroyed_NN:
            my_NN = list_net[k]
    return my_NN, Result_chi2
Ejemplo n.º 24
0
'''training set'''
data_train_input = np.array(Data_train[param][:train_size])
data_train_target = np.array(Data_train[['isSignal']][:train_size])
'''testing set'''
data_test_input = np.array(Data_test[param][:test_size])
data_test_target = np.array(Data_test[['isSignal']][:test_size])

## Basic use : training and testing

print('basic example of utilisation :')
'''training'''
chi2_list, error_list = User.train(my_NN,
                                   data_train_input,
                                   data_train_target,
                                   num_epochs=my_num_epochs,
                                   optimizer=Optimizer.SGD(lr=my_lr),
                                   batch_size=my_batch_size)

plt.plot(range(my_num_epochs), chi2_list)
plt.xlabel('Epoch')
plt.ylabel('Mean squared error')
plt.title('Evolution of the training error : basic example')
'''testing '''
data_test_prediction = User.prediction(my_NN, data_test_input)

error = Error_round.error_round(data_test_prediction, data_test_target)
print('% of false error for testing set : ', error)
'''histogram of predictions'''
plt.figure()
S = data_test_prediction[data_test_target == 1]
B = data_test_prediction[data_test_target == 0]