Пример #1
0
 def optimize(self, nnet):
     timer = Stopwatch(verbose=False).start()
     self.total_epochs += self.max_epochs
     for i in xrange(self.max_epochs):
         self.epoch += 1
         if self.verbose:
             print_inline('Epoch {0:>{1}}/{2} '.format(
                 self.epoch, len(str(self.total_epochs)),
                 self.total_epochs))
         if self.verbose and self.early_stopping and nnet._X_val is not None:
             print_inline(' early stopping after {0} '.format(
                 self._early_stopping))
         losses = self.train_epoch(nnet)
         self.loss_history.append(losses)
         msg = 'elapsed: {0} sec'.format(
             width_format(timer.elapsed(), default_width=5,
                          max_precision=2))
         msg += ' - loss: {0}'.format(
             width_format(np.mean(losses), default_width=5,
                          max_precision=4))
         score = nnet._metric(nnet._y, nnet.validate())
         self.score_history.append(score)
         # TODO: change acc to metric name
         msg += ' - acc.: {0}'.format(
             width_format(score, default_width=6, max_precision=4))
         if nnet._X_val is not None:
             if self._early_stopping > 0 and self.epoch > 1:
                 self._early_stopping -= 1
             val_loss = nnet._loss(nnet._y_val,
                                   nnet.validate_proba(nnet._X_val))
             self.val_loss_history.append(val_loss)
             val_score = nnet._metric(nnet._y_val,
                                      nnet.validate(nnet._X_val))
             if self.epoch > 1 and val_score < 0.2 * self.val_score_history[
                     -1]:
                 return
             self.val_score_history.append(val_score)
             if self.epoch > 1 and val_score > nnet.best_val_score_:
                 nnet.best_val_score_ = val_score
                 nnet.best_epoch_ = self.epoch  # TODO move to optimizer
                 nnet._save_best_weights()
                 self._early_stopping = self.early_stopping  # reset counter
             msg += ' - val. loss: {0}'.format(
                 width_format(val_loss, default_width=5, max_precision=4))
             # TODO: fix acc.
             msg += ' - val. acc.: {0}'.format(
                 width_format(val_score, default_width=6, max_precision=4))
             if self._early_stopping == 0:
                 if self.verbose: print msg
                 return
         if self.verbose: print msg
         if self.epoch > 1 and self.plot:
             if not os.path.exists(self.plot_dirpath):
                 os.makedirs(self.plot_dirpath)
             plot_learning_curves(self.loss_history,
                                  self.score_history,
                                  self.val_loss_history,
                                  self.val_score_history,
                                  dirpath=self.plot_dirpath)
Пример #2
0
def main():
    args = get_args()
    torch.manual_seed(args.seed)

    shape = (224, 224, 3)
    """ define dataloader """
    train_loader, valid_loader, test_loader = make_dataloader(args)
    """ define model architecture """
    model = get_model(args, shape, args.num_classes)

    if torch.cuda.device_count() >= 1:
        print('Model pushed to {} GPU(s), type {}.'.format(
            torch.cuda.device_count(), torch.cuda.get_device_name(0)))
        model = model.cuda()
    else:
        raise ValueError('CPU training is not supported')
    """ define loss criterion """
    criterion = nn.CrossEntropyLoss().cuda()
    """ define optimizer """
    optimizer = make_optimizer(args, model)
    """ define learning rate scheduler """
    scheduler = make_scheduler(args, optimizer)
    """ define trainer, evaluator, result_dictionary """
    result_dict = {
        'args': vars(args),
        'epoch': [],
        'train_loss': [],
        'train_acc': [],
        'val_loss': [],
        'val_acc': [],
        'test_acc': []
    }
    trainer = Trainer(model, criterion, optimizer, scheduler)
    evaluator = Evaluator(model, criterion)

    if args.evaluate:
        """ load model checkpoint """
        model.load()
        result_dict = evaluator.test(test_loader, args, result_dict)
    else:
        evaluator.save(result_dict)
        """ define training loop """
        for epoch in range(args.epochs):
            result_dict['epoch'] = epoch
            result_dict = trainer.train(train_loader, epoch, args, result_dict)
            result_dict = evaluator.evaluate(valid_loader, epoch, args,
                                             result_dict)
            evaluator.save(result_dict)
            plot_learning_curves(result_dict, epoch, args)

        result_dict = evaluator.test(test_loader, args, result_dict)
        evaluator.save(result_dict)
        """ save model checkpoint """
        model.save()

    print(result_dict)
Пример #3
0
    def plot_learning_curves(self, val=True):
        """
        Affichage des learning_curves

        @param val: bool
                True si un set de validation a été utilisé durant l'entrainelment. Faux sinon.

        @return: None
        """
        utils.plot_learning_curves(self.history,
                                   self.compile_specs['metrics'],
                                   val=val)
        save_file = "{}/{}_{}.ckpt".format(SAVE_DIR, args.name, epoch)
        train_saver.save(sess, save_file)

        # Evaluate on validation set
        model.reuse = True
        sess.run(model.auc_init)
        sess.run(model.acc_init)

        for i, (inputs, targets, target_ids) in enumerate(val_set):
            loss, acc_update, auc_update, summary_loss = sess.run(
                [model.loss, model.accuracy[1], model.auc[1], merged_loss],
                feed_dict={
                    model.inputs: inputs,
                    model.targets: targets,
                    model.target_ids: target_ids})

        accuracy, auc, summary_aucacc = sess.run(
            [model.accuracy[0], model.auc[0], merged_aucacc])
        print("Epoch {},  Loss: {:.3f},  Accuracy: {:.3f},  AUC: {:.3f} (valid)"
              .format(epoch, loss, accuracy, auc))

        valid_writer.add_summary(summary_loss, epoch)
        valid_writer.add_summary(summary_aucacc, epoch)

    train_writer.close()
    valid_writer.close()

    print("Saved model at", save_file)  # training finished

plot_learning_curves(SAVE_DIR, args.epochs)
Пример #5
0
    callbacks = []
    callbacks.append(
        ModelCheckpoint('./checkpoints/{}.h5'.format(model_name),
                        save_best_only=True,
                        save_weights_only=False))

    history = model.fit(
        x=X_train,
        y=Y_train,
        batch_size=batch_size,
        epochs=epochs,
        validation_split=0.2,
        callbacks=callbacks,
    )

    plot_learning_curves(history, model_name + '_transfer1', args.GAN)

    for layer in model.layers[:-5]:
        layer.trainable = False
    for layer in model.layers[-5:]:
        layer.trainable = True

    model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['acc'])

    callbacks = []
    callbacks.append(
        ModelCheckpoint('./checkpoints/{}.h5'.format(model_name),
                        save_best_only=True,
                        save_weights_only=False))
                             featurewise_std_normalization=True,
                             rotation_range=45,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             vertical_flip=True,
                             fill_mode='nearest')

datagen.fit(X_train)

callbacks = []
callbacks.append(
    tf.keras.callbacks.ModelCheckpoint('./checkpoints/own_model.h5',
                                       save_best_only=True,
                                       save_weights_only=False))

history = model.fit(
    x=X_train,
    y=Y_train,
    batch_size=batch_size,
    epochs=epochs,
    validation_split=0.2,
    callbacks=callbacks,
)

plot_learning_curves(history, 'own_model', args.GAN)

test_acc(X_dev, Y_dev, figure_size, './checkpoints/own_model.h5')
Пример #7
0
        else:
            pis.append(pi/ct)
    pbar = sum(pis)/len(pis)
    pebar = sum([x**2 for x in pjs])
    print(pbar, pebar)
    return (pbar - pebar)/(1 - pebar)


np.random.seed(165)
dataset = 'blood.csv'

#X_train, X_test, y_train, y_test = load_data(dataset, 0.8)
X_train, X_test, y_train, y_test, kappa = load_music('music_train.pkl', 'music_test.pkl', 0.4)

print(kappa)
'''
n_seed = 15
query_budget = 150
reps = 100
ps = [0, 0.3]
log_interval = 10
all_results = np.zeros((reps, len(ps), query_budget - n_seed))

rtup = []
for rep in range(reps):
    for i, p in enumerate(ps):
        rtup.append((rep, i, p))

pool = Pool(mp.cpu_count())
histories = pool.map(run_exp_music, rtup)
Пример #8
0
from network import NeuralNetwork, Layer
from train import BCE_Loss, train_cv
from utils import plot_decision_boundaries, plot_learning_curves

if __name__ == '__main__':
    data = np.loadtxt('classification2.txt', delimiter=',')
    X = data[:, :2]
    y = data[:, -1:]

    net = NeuralNetwork([
        Layer(2, 2, activation='ReLU'),
        Layer(2, 1, activation='sigmoid'),
    ])

    train_losses, test_losses, accus, nets = train_cv(
        net,
        X,
        y,
        loss=BCE_Loss(),
        lr=0.01,
        n_epochs=1000,
        k=5,
        reset_weights=True,  # new initial (random) weights for each fold
    )

    plot_learning_curves(train_losses, test_losses, accus)

    plot_decision_boundaries(nets, X, y)

    print('Done')
Пример #9
0
def main():
    args = get_args()
    torch.manual_seed(args.seed)

    shape = (224, 224, 3)
    """ define dataloader """
    train_loader, valid_loader, test_loader = make_dataloader(args)
    """ define model architecture """
    model = get_model(args, shape, args.num_classes)

    if torch.cuda.device_count() >= 1:
        print('Model pushed to {} GPU(s), type {}.'.format(
            torch.cuda.device_count(), torch.cuda.get_device_name(0)))
        model = model.cuda()
    else:
        raise ValueError('CPU training is not supported')
    """ define loss criterion """
    criterion = nn.CrossEntropyLoss().cuda()
    """ define optimizer """
    optimizer = make_optimizer(args, model)
    """ define learning rate scheduler """
    scheduler = make_scheduler(args, optimizer)
    """ define loss scaler for automatic mixed precision """
    scaler = torch.cuda.amp.GradScaler()
    """ define trainer, evaluator, result_dictionary """
    result_dict = {
        'args': vars(args),
        'epoch': [],
        'train_loss': [],
        'train_acc': [],
        'val_loss': [],
        'val_acc': [],
        'test_acc': []
    }
    trainer = Trainer(model, criterion, optimizer, scheduler, scaler)
    evaluator = Evaluator(model, criterion)

    train_time_list = []
    valid_time_list = []

    if args.evaluate:
        """ load model checkpoint """
        model.load()
        result_dict = evaluator.test(test_loader, args, result_dict)
    else:
        evaluator.save(result_dict)

        best_val_acc = 0.0
        """ define training loop """
        for epoch in range(args.epochs):
            result_dict['epoch'] = epoch

            torch.cuda.synchronize()
            tic1 = time.time()

            result_dict = trainer.train(train_loader, epoch, args, result_dict)

            torch.cuda.synchronize()
            tic2 = time.time()
            train_time_list.append(tic2 - tic1)

            torch.cuda.synchronize()
            tic3 = time.time()

            result_dict = evaluator.evaluate(valid_loader, epoch, args,
                                             result_dict)

            torch.cuda.synchronize()
            tic4 = time.time()
            valid_time_list.append(tic4 - tic3)

            if result_dict['val_acc'][-1] > best_val_acc:
                print("{} epoch, best epoch was updated! {}%".format(
                    epoch, result_dict['val_acc'][-1]))
                best_val_acc = result_dict['val_acc'][-1]
                model.save(checkpoint_name='best_model')

            evaluator.save(result_dict)
            plot_learning_curves(result_dict, epoch, args)

        result_dict = evaluator.test(test_loader, args, result_dict)
        evaluator.save(result_dict)
        """ calculate test accuracy using best model """
        model.load(checkpoint_name='best_model')
        result_dict = evaluator.test(test_loader, args, result_dict)
        evaluator.save(result_dict)

    print(result_dict)

    np.savetxt(os.path.join(model.checkpoint_dir, model.checkpoint_name,
                            'train_time_amp.csv'),
               train_time_list,
               delimiter=',',
               fmt='%s')
    np.savetxt(os.path.join(model.checkpoint_dir, model.checkpoint_name,
                            'valid_time_amp.csv'),
               valid_time_list,
               delimiter=',',
               fmt='%s')
def main():
    args = get_args()
    torch.manual_seed(args.seed)

    shape = (224, 224, 3)
    """ define dataloader """
    train_loader, valid_loader, test_loader = make_dataloader(args)
    """ define model architecture """
    model = get_model(args, shape, args.num_classes)

    if torch.cuda.device_count() >= 1:
        print('Model pushed to {} GPU(s), type {}.'.format(
            torch.cuda.device_count(), torch.cuda.get_device_name(0)))
        model = model.cuda()
    else:
        raise ValueError('CPU training is not supported')
    """ define loss criterion """
    criterion = nn.CrossEntropyLoss().cuda()
    """ define optimizer """
    optimizer = make_optimizer(args, model)
    """ define learning rate scheduler """
    scheduler = make_scheduler(args, optimizer)
    """ define trainer, evaluator, result_dictionary """
    result_dict = {
        'args': vars(args),
        'epoch': [],
        'train_loss': [],
        'train_acc': [],
        'val_loss': [],
        'val_acc': [],
        'test_acc': []
    }
    trainer = Trainer(model, criterion, optimizer, scheduler)
    evaluator = Evaluator(model, criterion)

    if args.evaluate:
        """ load model checkpoint """
        model.load("best_model")
        result_dict = evaluator.test(test_loader, args, result_dict, True)

        model.load("last_model")
        result_dict = evaluator.test(test_loader, args, result_dict, False)

    else:
        evaluator.save(result_dict)

        best_val_acc = 0.0
        """ define training loop """
        tolerance = 0
        for epoch in range(args.epochs):
            result_dict['epoch'] = epoch
            result_dict = trainer.train(train_loader, epoch, args, result_dict)
            result_dict = evaluator.evaluate(valid_loader, epoch, args,
                                             result_dict)

            tolerance += 1
            print("tolerance: ", tolerance)

            if result_dict['val_acc'][-1] > best_val_acc:
                tolerance = 0
                print("{} epoch, best epoch was updated! {}%".format(
                    epoch, result_dict['val_acc'][-1]))
                best_val_acc = result_dict['val_acc'][-1]
                model.save(checkpoint_name='best_model')

            evaluator.save(result_dict)
            plot_learning_curves(result_dict, epoch, args)

            if tolerance > 20:
                break

        result_dict = evaluator.test(test_loader, args, result_dict, False)
        evaluator.save(result_dict)
        """ save model checkpoint """
        model.save(checkpoint_name='last_model')
        """ calculate test accuracy using best model """
        model.load(checkpoint_name='best_model')
        result_dict = evaluator.test(test_loader, args, result_dict, True)
        evaluator.save(result_dict)

    print(result_dict)
Пример #11
0
            # saving the score for tracking progress
            rewards_by_target_updates.append(total_reward)
            tricky_rewards_by_target_updates.append(total_tricky_reward)

            if step % 2000 == 0:
                plot_best_actions(simple_model,
                                  positions_range,
                                  velocity_range,
                                  max_steps,
                                  marker="step" + str(step))

            simple_model.target_model = target_model

    return rewards_by_target_updates, tricky_rewards_by_target_updates, simple_model


if __name__ == "__main__":
    # reading actions maps
    shutil.rmtree("actions_tracking/", ignore_errors=True)
    os.mkdir("actions_tracking")

    rewards, trewards, simple_model = run()
    plot_learning_curves(rewards, trewards, max_steps, target_update)
    plot_best_actions(simple_model,
                      positions_range,
                      velocity_range,
                      max_steps,
                      marker=None)
    env.close()