Example #1
0
def train_and_evaluate(model, optimizer, scheduler, loss_fn, reg_fn,
                       dataloaders, params):
    dl_label = dataloaders['label']
    dl_unlabel = dataloaders['unlabel']
    dl_val = dataloaders['val']
    dl_test = dataloaders['test']

    # training steps
    is_best = False
    best_val_score = -float('inf')
    best_test_score = -float('inf')
    plot_history = {'val_acc': [], 'test_acc': []}
    for step in tqdm(range(params.n_iters)):
        if step >= params.decay_iter:
            scheduler.step()
        nll, vat = train_single_iter(model, optimizer, loss_fn, reg_fn,
                                     dl_label, dl_unlabel, params)
        # report logs for each iter (mini-batch)
        logging.info(
            "Iteration {}/{} ; LOSS {:05.3f} ; NLL {:05.3f} ; VAT {:05.3f}".
            format(step + 1, params.n_iters, nll + vat, nll, vat))
        if (step + 1) % params.n_summary_steps == 0:
            val_score = evalutate(model, dl_val, params)
            test_score = evalutate(model, dl_test, params)
            plot_history['val_acc'].append(val_score)
            plot_history['test_acc'].append(test_score)
            logging.info("Val_score {:05.3f} ; Test_score {:05.3f}".format(
                val_score, test_score))
            is_best = val_score > best_val_score
            if is_best:
                best_val_score = val_score
                best_test_score = test_score
                logging.info("Found new best accuray")
            print('[{}] Val score was {}'.format(step + 1, val_score))
            print('[{}] Test score was {}'.format(step + 1, test_score))
    print('Best val score was {}'.format(best_val_score))
    print('Best test score was {}'.format(best_test_score))

    # Store results
    results = {
        'Best val score': best_val_score,
        'Best test score': best_test_score
    }
    utils.save_dict_to_json(results,
                            os.path.join(args.model_dir, 'results.json'))
    utils.plot_training_results(args.model_dir, plot_history)
Example #2
0
    def _save_history(self, history, save_path):
        if not os.path.isdir(save_path):
            os.makedirs(save_path)
        '''
        for i, h in enumerate(zip(*history)):
            filename = os.path.join(save_path, 'history_class{}.json'.format(i))

            with open(filename, 'w') as f:
                json.dump(h, f)
        '''
        plot_history = {
            'test_acc': [i[0] for i in history],
            'test_f1': [i[1] for i in history]
        }
        utils.plot_training_results(save_path, plot_history)
        with open( os.path.join(save_path, 'plot_history.pkl') , 'wb') as f:
            pickle.dump(plot_history, f)
        
        output = "Accuracy: {} \n F1 Score: {}".format(self.best_test_acc, self.best_f1_score)
        with open( os.path.join(save_path, 'results.txt') , 'w') as f:
            f.write(output)
import utils
import trainingFunctions
from resnet_v1 import ResNet_v1

training_loss = None
if LOSS == 'SCCE':
    training_loss = SparseCategoricalCrossentropy()

training_opti = None
if OPTI == 'ADAM':
    training_opti = Adam()

model = ResNet_v1.build(width=IMAG_WIDTH,
                        height=IMG_HEIGHT,
                        depth=IMG_DEPTH,
                        classes=NB_CLASSES,
                        stages=STAGES,
                        filters=FILTERS,
                        se=SE_MODULES)

model.compile(
    optimizer=training_opti,
    loss=training_loss,
    metrics=['accuracy'],
)

history = trainingFunctions.training_augmented(model, EPOCHS, SEED)
utils.plot_training_results(history, EPOCHS, model, save=True)
utils.log_training_results(history, model)
trainingFunctions.increment_training_cpt()
Example #4
0
def train_and_evaluate(model,
                       meta_train_classes,
                       meta_val_classes,
                       meta_test_classes,
                       task_type,
                       optimizer,
                       scheduler,
                       loss_fn,
                       metrics,
                       params,
                       model_dir,
                       restore_file=None):
    """
    Train the model and evaluate every `save_summary_steps`.

    Args:
        model: TPN model
        meta_train_classes: (list) the classes for meta-training
        meta_val_classes: (list) the classes for meta-validating
        meta_test_classes: (list) the classes for meta-testing
        task_type: (subclass of FewShotTask) a type for generating tasks
        optimizer: (torch.optim) optimizer for parameters of model
        scheduler: (torch.optim.lr_scheduler) scheduler for decaying learning rate
        loss_fn: a loss function
        metrics: (dict) a dictionary of functions that compute a metric using 
                 the output and labels of each batch
        params: (Params) hyperparameters
        model_dir: (string) directory containing config, weights and log
        restore_file: (string) optional- name of file to restore from
                      (without its extension .pth.tar)
    """
    # reload weights from restore_file if specified
    if restore_file is not None:
        restore_path = os.path.join(args.model_dir,
                                    args.restore_file + '.pth.tar')
        logging.info("Restoring parameters from {}".format(restore_path))
        utils.load_checkpoint(restore_path, model, optimizer)

    # params information
    num_classes = params.num_classes
    num_samples = params.num_samples
    num_query = params.num_query

    # validation accuracy
    best_val_loss = float('inf')

    # For plotting to see summerized training procedure
    plot_history = {
        'train_loss': [],
        'train_acc': [],
        'val_loss': [],
        'val_acc': [],
        'test_loss': [],
        'test_acc': []
    }

    with tqdm(total=params.num_episodes) as t:
        for episode in range(params.num_episodes):
            # Run one episode
            logging.info("Episode {}/{}".format(episode + 1,
                                                params.num_episodes))
            scheduler.step()

            # Train a model on a single task (episode).
            # TODO meta-batch of tasks
            task = task_type(meta_train_classes, num_classes, num_samples,
                             num_query)
            dataloaders = fetch_dataloaders(['train', 'test'], task)

            _ = train_single_task(model, optimizer, loss_fn, dataloaders,
                                  metrics, params)
            # print(episode, _)

            # Evaluate on train, val, test dataset given a number of tasks (params.num_steps)
            if (episode + 1) % params.save_summary_steps == 0:
                train_metrics = evaluate(model, loss_fn, meta_train_classes,
                                         task_type, metrics, params, 'train')
                val_metrics = evaluate(model, loss_fn, meta_val_classes,
                                       task_type, metrics, params, 'val')
                test_metrics = evaluate(model, loss_fn, meta_test_classes,
                                        task_type, metrics, params, 'test')

                train_loss = train_metrics['loss']
                val_loss = val_metrics['loss']
                test_loss = test_metrics['loss']
                train_acc = train_metrics['accuracy']
                val_acc = val_metrics['accuracy']
                test_acc = test_metrics['accuracy']

                is_best = val_loss <= best_val_loss

                # Save weights
                utils.save_checkpoint({
                    'episode': episode + 1,
                    'state_dict': model.state_dict(),
                    'optim_dict': optimizer.state_dict()
                },
                                      is_best=is_best,
                                      checkpoint=model_dir)

                # If best_test, best_save_path
                if is_best:
                    logging.info("- Found new best accuracy")
                    best_val_loss = val_loss

                    # Save best test metrics in a json file in the model directory
                    best_train_json_path = os.path.join(
                        model_dir, "metrics_train_best_weights.json")
                    utils.save_dict_to_json(train_metrics,
                                            best_train_json_path)
                    best_val_json_path = os.path.join(
                        model_dir, "metrics_val_best_weights.json")
                    utils.save_dict_to_json(val_metrics, best_val_json_path)
                    best_test_json_path = os.path.join(
                        model_dir, "metrics_test_best_weights.json")
                    utils.save_dict_to_json(test_metrics, best_test_json_path)

                # Save latest test metrics in a json file in the model directory
                last_train_json_path = os.path.join(
                    model_dir, "metrics_train_last_weights.json")
                utils.save_dict_to_json(train_metrics, last_train_json_path)
                last_val_json_path = os.path.join(
                    model_dir, "metrics_val_last_weights.json")
                utils.save_dict_to_json(val_metrics, last_val_json_path)
                last_test_json_path = os.path.join(
                    model_dir, "metrics_test_last_weights.json")
                utils.save_dict_to_json(test_metrics, last_test_json_path)

                plot_history['train_loss'].append(train_loss)
                plot_history['train_acc'].append(train_acc)
                plot_history['val_loss'].append(val_loss)
                plot_history['val_acc'].append(val_acc)
                plot_history['test_loss'].append(test_loss)
                plot_history['test_acc'].append(test_acc)
                utils.plot_training_results(args.model_dir, plot_history)

                t.set_postfix(
                    tr_acc='{:05.3f}'.format(train_acc),
                    te_acc='{:05.3f}'.format(test_acc),
                    tr_loss='{:05.3f}'.format(train_loss),
                    te_loss='{:05.3f}'.format(test_loss))
                print('\n')

            t.update()
Example #5
0
def train_and_evaluate(model,
                       meta_train_classes,
                       meta_test_classes,
                       task_type,
                       meta_optimizer,
                       loss_fn,
                       metrics,
                       params,
                       model_dir,
                       restore_file=None):
    """
    Train the model and evaluate every `save_summary_steps`.

    Args:
        model: (MetaLearner) a meta-learner for MAML algorithm
        meta_train_classes: (list) the classes for meta-training
        meta_train_classes: (list) the classes for meta-testing
        task_type: (subclass of FewShotTask) a type for generating tasks
        meta_optimizer: (torch.optim) an meta-optimizer for MetaLearner
        loss_fn: a loss function
        metrics: (dict) a dictionary of functions that compute a metric using 
                 the output and labels of each batch
        params: (Params) hyperparameters
        model_dir: (string) directory containing config, weights and log
        restore_file: (string) optional- name of file to restore from
                      (without its extension .pth.tar)
    TODO Validation classes
    """
    # reload weights from restore_file if specified
    if restore_file is not None:
        restore_path = os.path.join(args.model_dir,
                                    args.restore_file + '.pth.tar')
        logging.info("Restoring parameters from {}".format(restore_path))
        utils.load_checkpoint(restore_path, model, meta_optimizer)

    # params information
    num_classes = params.num_classes
    num_samples = params.num_samples
    num_query = params.num_query
    num_inner_tasks = params.num_inner_tasks
    meta_lr = params.meta_lr

    # TODO validation accuracy
    best_test_acc = 0.0

    # For plotting to see summerized training procedure
    plot_history = {
        'train_loss': [],
        'train_acc': [],
        'test_loss': [],
        'test_acc': []
    }

    with tqdm(total=params.num_episodes) as t:
        for episode in range(params.num_episodes):
            # Run one episode
            logging.info("Episode {}/{}".format(episode + 1,
                                                params.num_episodes))

            # Run inner loops to get adapted parameters (theta_t`)
            adapted_state_dicts = []
            dataloaders_list = []
            for n_task in range(num_inner_tasks):
                task = task_type(meta_train_classes, num_classes, num_samples,
                                 num_query)
                dataloaders = fetch_dataloaders(['train', 'test', 'meta'],
                                                task)
                # Perform a gradient descent to meta-learner on the task
                a_dict = train_single_task(model, loss_fn, dataloaders, params)
                # Store adapted parameters
                # Store dataloaders for meta-update and evaluation
                adapted_state_dicts.append(a_dict)
                dataloaders_list.append(dataloaders)

            # Update the parameters of meta-learner
            # Compute losses with adapted parameters along with corresponding tasks
            # Updated the parameters of meta-learner using sum of the losses
            meta_loss = 0
            for n_task in range(num_inner_tasks):
                dataloaders = dataloaders_list[n_task]
                dl_meta = dataloaders['meta']
                X_meta, Y_meta = dl_meta.__iter__().next()
                if params.cuda:
                    X_meta, Y_meta = X_meta.cuda(async=True), Y_meta.cuda(
                        async=True)

                a_dict = adapted_state_dicts[n_task]
                Y_meta_hat = model(X_meta, a_dict)
                loss_t = loss_fn(Y_meta_hat, Y_meta)
                meta_loss += loss_t
            meta_loss /= float(num_inner_tasks)
            # print(meta_loss.item())

            # Meta-update using meta_optimizer
            meta_optimizer.zero_grad()
            meta_loss.backward()
            meta_optimizer.step()
            # print(model.task_lr.values())

            # Evaluate model on new task
            # Evaluate on train and test dataset given a number of tasks (params.num_steps)
            if (episode + 1) % params.save_summary_steps == 0:
                train_metrics = evaluate(model, loss_fn, meta_train_classes,
                                         task_type, metrics, params, 'train')
                test_metrics = evaluate(model, loss_fn, meta_test_classes,
                                        task_type, metrics, params, 'test')

                train_loss = train_metrics['loss']
                test_loss = test_metrics['loss']
                train_acc = train_metrics['accuracy']
                test_acc = test_metrics['accuracy']

                is_best = test_acc >= best_test_acc

                # Save weights
                utils.save_checkpoint(
                    {
                        'episode': episode + 1,
                        'state_dict': model.state_dict(),
                        'optim_dict': meta_optimizer.state_dict()
                    },
                    is_best=is_best,
                    checkpoint=model_dir)

                # If best_test, best_save_path
                if is_best:
                    logging.info("- Found new best accuracy")
                    best_test_acc = test_acc

                    # Save best test metrics in a json file in the model directory
                    best_train_json_path = os.path.join(
                        model_dir, "metrics_train_best_weights.json")
                    utils.save_dict_to_json(train_metrics,
                                            best_train_json_path)
                    best_test_json_path = os.path.join(
                        model_dir, "metrics_test_best_weights.json")
                    utils.save_dict_to_json(test_metrics, best_test_json_path)

                # Save latest test metrics in a json file in the model directory
                last_train_json_path = os.path.join(
                    model_dir, "metrics_train_last_weights.json")
                utils.save_dict_to_json(train_metrics, last_train_json_path)
                last_test_json_path = os.path.join(
                    model_dir, "metrics_test_last_weights.json")
                utils.save_dict_to_json(test_metrics, last_test_json_path)

                plot_history['train_loss'].append(train_loss)
                plot_history['train_acc'].append(train_acc)
                plot_history['test_loss'].append(test_loss)
                plot_history['test_acc'].append(test_acc)

                t.set_postfix(tr_acc='{:05.3f}'.format(train_acc),
                              te_acc='{:05.3f}'.format(test_acc),
                              tr_loss='{:05.3f}'.format(train_loss),
                              te_loss='{:05.3f}'.format(test_loss))
                print('\n')

            t.update()

    utils.plot_training_results(args.model_dir, plot_history)
Example #6
0
    # Checkpoint
    best_checkpoint = ModelCheckpoint(filepath=CHECKPOINTS_PATH,
                                      monitor='accuracy',
                                      save_weights_only=True,
                                      save_best_only=True,
                                      save_freq=training_steps * 5,
                                      verbose=1,
                                      mode='auto')

    callbacks = [best_checkpoint]

    history = train(train_ds, val_ds, model, args["learning_rate"],
                    args["epochs"], callbacks, class_weights)

    plot_training_results(history, PLOTS_PATH + "pre_fine_tuning_plot.png")

    evaluate_model(test_ds, model, with_cam=False, show_images=False)

    # Fine-tune the last upper layers.
    layers_to_fine_tune = args["fine_tune_layers"]
    model_object.set_fine_tune_layers(layers_to_fine_tune)

    # Retreive the new model.
    model = model_object.get_model()

    model.summary()

    # Train the model again in order to fine tune the last layers. We use a lower learning rate and double epochs, as
    # this session will be the most important one for the model performance.
    history = train(train_ds, val_ds, model, FINE_TUNING_LR,