Exemple #1
0
def load_dataset(dataset_name, cv):
    opt = fake_dataset_opt(dataset_name, cv)
    # print(dataset_name)
    # print(opt.__dict__)
    # input()
    dataset, val_dataset, tst_dataset = create_trn_val_tst_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    return dataset, val_dataset, tst_dataset
Exemple #2
0
def test(opt):

    trn_dataset, val_dataset, tst_dataset = create_trn_val_tst_dataset(opt)

    logger.info('The number of validation samples = %d' % len(val_dataset))
    logger.info('The number of testing samples = %d' % len(tst_dataset))

    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(opt)
    model.cuda()

    # test
    logger.info('Loading model : epoch-%d' % opt.eval_epoch)
    model.load_networks(opt.eval_epoch)
    logger.info('Finish loading model')

    acc = eval(model, val_dataset, tst_dataset, is_save=False, phase='val')
    logger.info('Val result acc %.4f' % (acc))

    acc = eval(model, val_dataset, tst_dataset, is_save=False, phase='test')
    logger.info('Tst result acc %.4f' % (acc))
Exemple #3
0
def train(opt):

    trn_dataset, val_dataset, tst_dataset = create_trn_val_tst_dataset(opt)
    dataset_size = len(trn_dataset)
    logger.info('The number of training samples = %d' % dataset_size)
    writer = SummaryWriter()

    model = create_model(
        opt)  # create a model given opt.model and other options
    model.setup(opt)
    model.cuda()

    best_eval_acc = 0  # record the best eval UAR
    total_iters = 0  # the total number of training iterations
    best_eval_epoch = -1  # record the best eval epoch

    for epoch in range(
            1, opt.niter + opt.niter_decay + 1
    ):  # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
        epoch_start_time = time.time()  # timer for entire epoch
        iter_data_time = time.time()  # timer for data loading per iteration
        epoch_iter = 0  # the number of training iterations in current epoch, reset to 0 every epoch

        for i, data in enumerate(trn_dataset):  # inner loop within one epoch
            iter_start_time = time.time(
            )  # timer for computation per iteration
            total_iters += 1  # opt.batch_size
            epoch_iter += opt.batch_size
            model.set_input(
                data)  # unpack data from dataset and apply preprocessing
            model.optimize_parameters(
                epoch
            )  # calculate loss functions, get gradients, update network weights

            if total_iters % opt.print_freq == 0:  # print training losses and save logging information to the disk
                losses = model.get_current_losses()
                t_comp = (time.time() - iter_start_time) / opt.batch_size
                logger.info('Cur epoch {}'.format(epoch) + ' loss ' + ' '.join(
                    map(lambda x: '{}:{{{}:.4f}}'.format(x, x),
                        model.loss_names)).format(**losses))

                writer.add_scalars('training_loss', dict(losses), total_iters)

            if total_iters % opt.save_latest_freq == 0:  # cache our latest model every <save_latest_freq> iterations
                logger.info(
                    'saving the latest model (epoch %d, total_iters %d)' %
                    (epoch, total_iters))
                save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
                model.save_networks(save_suffix)

            iter_data_time = time.time()

        if epoch % opt.save_epoch_freq == 0:  # cache our model every <save_epoch_freq> epochs
            logger.info('saving the model at the end of epoch %d, iters %d' %
                        (epoch, total_iters))
            model.save_networks('latest')
            model.save_networks(epoch)

        logger.info('End of training epoch %d / %d \t Time Taken: %d sec' %
                    (epoch, opt.niter + opt.niter_decay,
                     time.time() - epoch_start_time))

        model.update_learning_rate(
            logger)  # update learning rates at the end of every epoch.

        acc = eval(model, val_dataset, tst_dataset)

        logger.info('Val result of epoch %d / %d acc %.4f ' %
                    (epoch, opt.niter + opt.niter_decay, acc))

        if acc > best_eval_acc:
            best_eval_epoch = epoch
            best_eval_acc = acc

    writer.close()

    # print best eval result
    logger.info('Best eval epoch %d found with acc %f' %
                (best_eval_epoch, best_eval_acc))

    # test
    logger.info('Loading best model found on val set: epoch-%d' %
                best_eval_epoch)
    model.load_networks(best_eval_epoch)

    acc = eval(model, val_dataset, tst_dataset, is_save=True, phase='test')
    logger.info('Tst result acc %.4f' % (acc))

    clean_chekpoints(opt.name, best_eval_epoch)
Exemple #4
0
if __name__ == '__main__':
    teacher_path = 'checkpoints/ef_AVL_Adnn512,256,128_Vlstm128_maxpool_Lcnn128_fusion256,128run1/'
    # teacher_path = 'checkpoints/new_0416_simpleAE_ce_t1.0_f1.0_mse0.1_cycle0.1_run1'
    opt_path = os.path.join(teacher_path, 'train_opt.conf')
    opt = load_from_opt_record(opt_path)
    opt.isTrain = False  # teacher model should be in test mode
    opt.gpu_ids = [0]
    opt.serial_batches = True
    opt.dataset_mode = 'iemocap_miss'
    setattr(opt, 'miss_num', 'mix')
    modality = 'L'
    for cv in range(1, 11):
        opt.cvNo = cv
        teacher_path_cv = os.path.join(teacher_path, str(cv))
        dataset, val_dataset, tst_dataset = create_trn_val_tst_dataset(
            opt)  # create a dataset given opt.dataset_mode and other options
        # model = MultiFusionMultiModel(opt)
        # model = NewTranslationModel(opt)
        model = EarlyFusionMultiModel(opt)
        model.cuda()
        model.load_networks_cv(teacher_path_cv)
        # extractor = MultiLayerFeatureExtractor(model, 'netC.module[4]')
        # save_root = 'analysis/teacher_feats/{}/'.format(modality) + str(cv)
        # if not os.path.exists(save_root):
        #     os.makedirs(save_root)

        # extract(model, dataset, save_root, phase='trn', modality=modality)
        # extract(model, val_dataset, save_root, phase='val', modality=modality)
        # extract(model, tst_dataset, save_root, phase='tst', modality=modality)

        save_root = '/data2/lrc/Iemocap_feature/early_fusion_reps_mix'